From 163abc9cda7ff9b092e204e2244351aaa3cc46e0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 01:01:57 +0000 Subject: [PATCH 001/200] initial commit --- .devcontainer/Dockerfile | 9 + .devcontainer/devcontainer.json | 43 + .github/workflows/ci.yml | 76 + .gitignore | 16 + .python-version | 1 + .stats.yml | 4 + Brewfile | 2 + CONTRIBUTING.md | 129 ++ LICENSE | 7 + README.md | 385 +++- SECURITY.md | 23 + api.md | 627 ++++++ bin/publish-pypi | 6 + examples/.keep | 4 + mypy.ini | 50 + noxfile.py | 9 + pyproject.toml | 207 ++ requirements-dev.lock | 104 + requirements.lock | 45 + scripts/bootstrap | 19 + scripts/format | 8 + scripts/lint | 11 + scripts/mock | 41 + scripts/test | 61 + scripts/utils/ruffen-docs.py | 167 ++ scripts/utils/upload-artifact.sh | 25 + src/digitalocean_genai_sdk/__init__.py | 99 + src/digitalocean_genai_sdk/_base_client.py | 1943 +++++++++++++++++ src/digitalocean_genai_sdk/_client.py | 549 +++++ src/digitalocean_genai_sdk/_compat.py | 219 ++ src/digitalocean_genai_sdk/_constants.py | 14 + src/digitalocean_genai_sdk/_exceptions.py | 108 + src/digitalocean_genai_sdk/_files.py | 123 ++ src/digitalocean_genai_sdk/_models.py | 803 +++++++ src/digitalocean_genai_sdk/_qs.py | 150 ++ src/digitalocean_genai_sdk/_resource.py | 43 + src/digitalocean_genai_sdk/_response.py | 832 +++++++ src/digitalocean_genai_sdk/_streaming.py | 333 +++ src/digitalocean_genai_sdk/_types.py | 217 ++ src/digitalocean_genai_sdk/_utils/__init__.py | 57 + src/digitalocean_genai_sdk/_utils/_logs.py | 25 + src/digitalocean_genai_sdk/_utils/_proxy.py | 65 + .../_utils/_reflection.py | 42 + .../_utils/_resources_proxy.py | 24 + src/digitalocean_genai_sdk/_utils/_streams.py | 12 + src/digitalocean_genai_sdk/_utils/_sync.py | 86 + .../_utils/_transform.py | 447 ++++ src/digitalocean_genai_sdk/_utils/_typing.py | 151 ++ src/digitalocean_genai_sdk/_utils/_utils.py | 422 ++++ src/digitalocean_genai_sdk/_version.py | 4 + src/digitalocean_genai_sdk/lib/.keep | 4 + src/digitalocean_genai_sdk/py.typed | 0 .../resources/__init__.py | 243 +++ .../resources/assistants.py | 910 ++++++++ src/digitalocean_genai_sdk/resources/audio.py | 650 ++++++ .../resources/batches.py | 513 +++++ .../resources/chat/__init__.py | 33 + .../resources/chat/chat.py | 102 + .../resources/chat/completions.py | 1233 +++++++++++ .../resources/completions.py | 460 ++++ .../resources/embeddings.py | 236 ++ src/digitalocean_genai_sdk/resources/files.py | 608 ++++++ .../resources/fine_tuning/__init__.py | 47 + .../fine_tuning/checkpoints/__init__.py | 33 + .../fine_tuning/checkpoints/checkpoints.py | 102 + .../fine_tuning/checkpoints/permissions.py | 401 ++++ .../resources/fine_tuning/fine_tuning.py | 134 ++ .../resources/fine_tuning/jobs/__init__.py | 47 + .../resources/fine_tuning/jobs/checkpoints.py | 197 ++ .../resources/fine_tuning/jobs/events.py | 197 ++ .../resources/fine_tuning/jobs/jobs.py | 668 ++++++ .../resources/images.py | 592 +++++ .../resources/models.py | 305 +++ .../resources/moderations.py | 216 ++ .../resources/organization/__init__.py | 89 + .../resources/organization/admin_api_keys.py | 444 ++++ .../resources/organization/invites.py | 476 ++++ .../resources/organization/organization.py | 586 +++++ .../organization/projects/__init__.py | 75 + .../organization/projects/api_keys.py | 375 ++++ .../organization/projects/projects.py | 670 ++++++ .../organization/projects/rate_limits.py | 360 +++ .../organization/projects/service_accounts.py | 466 ++++ .../resources/organization/projects/users.py | 577 +++++ .../resources/organization/usage.py | 1543 +++++++++++++ .../resources/organization/users.py | 454 ++++ .../resources/realtime.py | 574 +++++ .../resources/responses.py | 902 ++++++++ .../resources/threads/__init__.py | 47 + .../resources/threads/messages.py | 654 ++++++ .../resources/threads/runs/__init__.py | 33 + .../resources/threads/runs/runs.py | 1427 ++++++++++++ .../resources/threads/runs/steps.py | 375 ++++ .../resources/threads/threads.py | 553 +++++ .../resources/uploads.py | 573 +++++ .../resources/vector_stores/__init__.py | 47 + .../resources/vector_stores/file_batches.py | 544 +++++ .../resources/vector_stores/files.py | 733 +++++++ .../resources/vector_stores/vector_stores.py | 847 +++++++ src/digitalocean_genai_sdk/types/__init__.py | 136 ++ .../types/assistant_create_params.py | 211 ++ .../types/assistant_delete_response.py | 15 + .../types/assistant_list_params.py | 39 + .../types/assistant_list_response.py | 20 + .../types/assistant_object.py | 133 ++ .../types/assistant_supported_models.py | 38 + .../types/assistant_tools_code.py | 12 + .../types/assistant_tools_code_param.py | 12 + .../types/assistant_tools_file_search.py | 56 + .../assistant_tools_file_search_param.py | 56 + .../types/assistant_tools_function.py | 15 + .../types/assistant_tools_function_param.py | 16 + .../types/assistant_update_params.py | 137 ++ .../assistants_api_response_format_option.py | 14 + ...stants_api_response_format_option_param.py | 16 + .../types/audio_generate_speech_params.py | 47 + .../types/audio_transcribe_audio_params.py | 87 + .../types/audio_transcribe_audio_response.py | 69 + .../types/audio_translate_audio_params.py | 47 + .../types/audio_translate_audio_response.py | 30 + .../types/audit_log_actor_user.py | 15 + .../types/audit_log_event_type.py | 30 + .../auto_chunking_strategy_request_param.py | 12 + src/digitalocean_genai_sdk/types/batch.py | 109 + .../types/batch_create_params.py | 46 + .../types/batch_list_params.py | 24 + .../types/batch_list_response.py | 21 + .../types/chat/__init__.py | 30 + .../types/chat/completion_create_params.py | 662 ++++++ .../types/chat/completion_delete_response.py | 18 + .../chat/completion_list_messages_params.py | 21 + .../chat/completion_list_messages_response.py | 31 + .../types/chat/completion_list_params.py | 31 + .../types/chat/completion_list_response.py | 26 + .../types/chat/completion_update_params.py | 20 + .../types/chat/create_response.py | 73 + .../types/chat/message_tool_call.py | 31 + .../types/chat/message_tool_call_param.py | 31 + .../types/chat/model_ids_shared_param.py | 57 + ...request_message_content_part_text_param.py | 15 + .../types/chat/response_format_json_object.py | 12 + .../chat/response_format_json_object_param.py | 12 + .../types/chat/response_format_json_schema.py | 48 + .../chat/response_format_json_schema_param.py | 46 + .../types/chat/response_format_text.py | 12 + .../types/chat/response_format_text_param.py | 12 + .../types/chat/response_message.py | 97 + .../types/chat/token_logprob.py | 57 + .../types/chat/usage.py | 54 + .../types/chat/web_search_context_size.py | 7 + .../types/chat/web_search_location.py | 27 + .../types/chat/web_search_location_param.py | 27 + .../chat_completion_stream_options_param.py | 20 + .../types/comparison_filter.py | 30 + .../types/comparison_filter_param.py | 30 + .../types/completion_create_params.py | 168 ++ .../types/completion_create_response.py | 63 + .../types/compound_filter.py | 22 + .../types/compound_filter_param.py | 23 + .../types/computer_tool_call.py | 198 ++ .../types/computer_tool_call_output.py | 50 + .../types/computer_tool_call_output_param.py | 51 + .../types/computer_tool_call_param.py | 199 ++ .../types/computer_tool_call_safety_check.py | 16 + .../computer_tool_call_safety_check_param.py | 18 + .../types/create_thread_request_param.py | 130 ++ .../types/embedding_create_params.py | 48 + .../types/embedding_create_response.py | 45 + .../types/file_delete_response.py | 15 + .../types/file_list_params.py | 33 + .../types/file_list_response.py | 20 + .../types/file_retrieve_content_response.py | 7 + .../types/file_search_ranker.py | 7 + .../types/file_search_tool_call.py | 51 + .../types/file_search_tool_call_param.py | 51 + .../types/file_upload_params.py | 23 + .../types/fine_tuning/__init__.py | 10 + .../types/fine_tuning/checkpoints/__init__.py | 10 + .../list_fine_tuning_checkpoint_permission.py | 34 + .../checkpoints/permission_create_params.py | 13 + .../checkpoints/permission_delete_response.py | 18 + .../checkpoints/permission_retrieve_params.py | 21 + .../types/fine_tuning/fine_tune_method.py | 78 + .../fine_tuning/fine_tune_method_param.py | 78 + .../types/fine_tuning/fine_tuning_job.py | 182 ++ .../types/fine_tuning/job_create_params.py | 152 ++ .../types/fine_tuning/job_list_params.py | 23 + .../types/fine_tuning/job_list_response.py | 17 + .../types/fine_tuning/jobs/__init__.py | 8 + .../jobs/checkpoint_retrieve_params.py | 15 + .../jobs/checkpoint_retrieve_response.py | 59 + .../fine_tuning/jobs/event_retrieve_params.py | 15 + .../jobs/event_retrieve_response.py | 40 + .../types/function_object.py | 41 + .../types/function_object_param.py | 42 + .../types/function_tool_call.py | 32 + .../types/function_tool_call_output.py | 32 + .../types/function_tool_call_output_param.py | 31 + .../types/function_tool_call_param.py | 31 + .../types/image_create_edit_params.py | 60 + .../types/image_create_generation_params.py | 62 + .../types/image_create_variation_params.py | 49 + .../types/images_response.py | 30 + .../types/includable.py | 9 + .../types/input_content.py | 53 + .../types/input_content_param.py | 53 + .../types/input_message.py | 30 + .../types/input_message_param.py | 31 + src/digitalocean_genai_sdk/types/model.py | 21 + .../types/model_delete_response.py | 13 + .../types/model_list_response.py | 15 + .../types/model_response_properties.py | 42 + .../types/moderation_classify_params.py | 59 + .../types/moderation_classify_response.py | 203 ++ .../types/openai_file.py | 51 + .../types/organization/__init__.py | 34 + .../types/organization/admin_api_key.py | 35 + .../admin_api_key_create_params.py | 11 + .../admin_api_key_delete_response.py | 15 + .../organization/admin_api_key_list_params.py | 19 + .../admin_api_key_list_response.py | 20 + .../types/organization/invite.py | 45 + .../organization/invite_create_params.py | 31 + .../organization/invite_delete_response.py | 16 + .../types/organization/invite_list_params.py | 24 + .../organization/invite_list_response.py | 28 + .../types/organization/organization_user.py | 27 + .../types/organization/project.py | 28 + .../organization/project_create_params.py | 12 + .../types/organization/project_list_params.py | 30 + .../organization/project_list_response.py | 21 + .../organization/project_update_params.py | 12 + .../types/organization/projects/__init__.py | 24 + .../types/organization/projects/api_key.py | 40 + .../projects/api_key_delete_response.py | 15 + .../projects/api_key_list_params.py | 24 + .../projects/api_key_list_response.py | 21 + .../organization/projects/project_user.py | 27 + .../types/organization/projects/rate_limit.py | 37 + .../projects/rate_limit_list_params.py | 30 + .../projects/rate_limit_list_response.py | 21 + .../projects/rate_limit_update_params.py | 29 + .../organization/projects/service_account.py | 24 + .../projects/service_account_create_params.py | 12 + .../service_account_create_response.py | 35 + .../service_account_delete_response.py | 15 + .../projects/service_account_list_params.py | 24 + .../projects/service_account_list_response.py | 21 + .../organization/projects/user_add_params.py | 15 + .../projects/user_delete_response.py | 15 + .../organization/projects/user_list_params.py | 24 + .../projects/user_list_response.py | 20 + .../projects/user_update_params.py | 14 + .../usage_audio_speeches_params.py | 55 + .../usage_audio_transcriptions_params.py | 55 + .../usage_code_interpreter_sessions_params.py | 45 + .../organization/usage_completions_params.py | 61 + .../organization/usage_embeddings_params.py | 55 + .../types/organization/usage_images_params.py | 69 + .../organization/usage_moderations_params.py | 55 + .../usage_vector_stores_params.py | 45 + .../organization/user_delete_response.py | 15 + .../types/organization/user_list_params.py | 28 + .../types/organization/user_list_response.py | 21 + .../types/organization/user_update_params.py | 12 + .../types/organization_get_costs_params.py | 43 + .../organization_list_audit_logs_params.py | 87 + .../organization_list_audit_logs_response.py | 433 ++++ .../types/output_message.py | 104 + .../types/output_message_param.py | 104 + .../types/realtime_create_session_params.py | 230 ++ .../types/realtime_create_session_response.py | 151 ++ ...ime_create_transcription_session_params.py | 149 ++ ...e_create_transcription_session_response.py | 100 + .../types/reasoning_effort.py | 8 + .../types/reasoning_item.py | 36 + .../types/reasoning_item_param.py | 36 + src/digitalocean_genai_sdk/types/response.py | 142 ++ .../types/response_create_params.py | 494 +++++ .../types/response_list_input_items_params.py | 28 + .../response_list_input_items_response.py | 76 + .../types/response_properties.py | 362 +++ .../types/response_retrieve_params.py | 26 + .../types/static_chunking_strategy.py | 20 + .../types/static_chunking_strategy_param.py | 22 + .../static_chunking_strategy_request_param.py | 16 + .../types/stop_configuration_param.py | 10 + .../types/thread_create_params.py | 130 ++ .../types/thread_delete_response.py | 15 + .../types/thread_object.py | 60 + .../types/thread_update_params.py | 51 + .../types/threads/__init__.py | 36 + .../assistant_tools_file_search_type_only.py | 12 + ...stant_tools_file_search_type_only_param.py | 12 + .../assistants_api_tool_choice_option.py | 23 + ...assistants_api_tool_choice_option_param.py | 23 + .../threads/create_message_request_param.py | 71 + .../message_content_image_file_object.py | 30 + ...message_content_image_file_object_param.py | 29 + .../message_content_image_url_object.py | 30 + .../message_content_image_url_object_param.py | 29 + .../types/threads/message_create_params.py | 71 + .../types/threads/message_delete_response.py | 15 + .../types/threads/message_list_params.py | 42 + .../types/threads/message_list_response.py | 20 + .../types/threads/message_object.py | 179 ++ .../types/threads/message_update_params.py | 22 + .../types/threads/run_create_params.py | 215 ++ .../types/threads/run_create_run_params.py | 178 ++ .../types/threads/run_list_params.py | 39 + .../types/threads/run_list_response.py | 20 + .../types/threads/run_object.py | 265 +++ .../threads/run_submit_tool_outputs_params.py | 33 + .../types/threads/run_update_params.py | 22 + .../types/threads/runs/__init__.py | 8 + .../types/threads/runs/run_step_object.py | 323 +++ .../types/threads/runs/step_list_params.py | 54 + .../types/threads/runs/step_list_response.py | 20 + .../threads/runs/step_retrieve_params.py | 26 + .../types/threads/truncation_object.py | 25 + .../types/threads/truncation_object_param.py | 25 + .../types/transcription_segment.py | 49 + src/digitalocean_genai_sdk/types/upload.py | 42 + .../types/upload_add_part_params.py | 14 + .../types/upload_add_part_response.py | 21 + .../types/upload_complete_params.py | 19 + .../types/upload_create_params.py | 29 + .../types/usage_response.py | 352 +++ .../types/vector_store_create_params.py | 47 + .../types/vector_store_delete_response.py | 15 + .../types/vector_store_expiration_after.py | 18 + .../vector_store_expiration_after_param.py | 18 + .../types/vector_store_list_params.py | 39 + .../types/vector_store_list_response.py | 20 + .../types/vector_store_object.py | 71 + .../types/vector_store_search_params.py | 40 + .../types/vector_store_search_response.py | 55 + .../types/vector_store_update_params.py | 28 + .../types/vector_stores/__init__.py | 15 + .../chunking_strategy_request_param.py | 13 + .../vector_stores/file_batch_create_params.py | 34 + .../file_batch_list_files_params.py | 47 + .../types/vector_stores/file_create_params.py | 33 + .../vector_stores/file_delete_response.py | 15 + .../types/vector_stores/file_list_params.py | 45 + .../file_retrieve_content_response.py | 30 + .../types/vector_stores/file_update_params.py | 21 + .../list_vector_store_files_response.py | 20 + .../vector_store_file_batch_object.py | 52 + .../vector_stores/vector_store_file_object.py | 88 + .../types/voice_ids_shared.py | 10 + .../types/voice_ids_shared_param.py | 12 + .../types/web_search_tool_call.py | 18 + .../types/web_search_tool_call_param.py | 18 + tests/__init__.py | 1 + tests/api_resources/__init__.py | 1 + tests/api_resources/chat/__init__.py | 1 + tests/api_resources/chat/test_completions.py | 731 +++++++ tests/api_resources/fine_tuning/__init__.py | 1 + .../fine_tuning/checkpoints/__init__.py | 1 + .../checkpoints/test_permissions.py | 309 +++ .../fine_tuning/jobs/__init__.py | 1 + .../fine_tuning/jobs/test_checkpoints.py | 126 ++ .../fine_tuning/jobs/test_events.py | 126 ++ tests/api_resources/fine_tuning/test_jobs.py | 437 ++++ tests/api_resources/organization/__init__.py | 1 + .../organization/projects/__init__.py | 1 + .../organization/projects/test_api_keys.py | 338 +++ .../organization/projects/test_rate_limits.py | 265 +++ .../projects/test_service_accounts.py | 431 ++++ .../organization/projects/test_users.py | 552 +++++ .../organization/test_admin_api_keys.py | 338 +++ .../organization/test_invites.py | 372 ++++ .../organization/test_projects.py | 429 ++++ .../api_resources/organization/test_usage.py | 834 +++++++ .../api_resources/organization/test_users.py | 362 +++ tests/api_resources/test_assistants.py | 528 +++++ tests/api_resources/test_audio.py | 383 ++++ tests/api_resources/test_batches.py | 366 ++++ tests/api_resources/test_completions.py | 148 ++ tests/api_resources/test_embeddings.py | 120 + tests/api_resources/test_files.py | 430 ++++ tests/api_resources/test_images.py | 320 +++ tests/api_resources/test_models.py | 246 +++ tests/api_resources/test_moderations.py | 108 + tests/api_resources/test_organization.py | 219 ++ tests/api_resources/test_realtime.py | 269 +++ tests/api_resources/test_responses.py | 479 ++++ tests/api_resources/test_threads.py | 424 ++++ tests/api_resources/test_uploads.py | 399 ++++ tests/api_resources/test_vector_stores.py | 603 +++++ tests/api_resources/threads/__init__.py | 1 + tests/api_resources/threads/runs/__init__.py | 1 + .../api_resources/threads/runs/test_steps.py | 307 +++ tests/api_resources/threads/test_messages.py | 602 +++++ tests/api_resources/threads/test_runs.py | 967 ++++++++ tests/api_resources/vector_stores/__init__.py | 1 + .../vector_stores/test_file_batches.py | 479 ++++ .../api_resources/vector_stores/test_files.py | 677 ++++++ tests/conftest.py | 53 + tests/sample_file.txt | 1 + tests/test_client.py | 1640 ++++++++++++++ tests/test_deepcopy.py | 58 + tests/test_extract_files.py | 64 + tests/test_files.py | 51 + tests/test_models.py | 891 ++++++++ tests/test_qs.py | 78 + tests/test_required_args.py | 111 + tests/test_response.py | 279 +++ tests/test_streaming.py | 262 +++ tests/test_transform.py | 453 ++++ tests/test_utils/test_proxy.py | 34 + tests/test_utils/test_typing.py | 73 + tests/utils.py | 159 ++ 414 files changed, 61733 insertions(+), 1 deletion(-) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 .python-version create mode 100644 .stats.yml create mode 100644 Brewfile create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 SECURITY.md create mode 100644 api.md create mode 100644 bin/publish-pypi create mode 100644 examples/.keep create mode 100644 mypy.ini create mode 100644 noxfile.py create mode 100644 pyproject.toml create mode 100644 requirements-dev.lock create mode 100644 requirements.lock create mode 100755 scripts/bootstrap create mode 100755 scripts/format create mode 100755 scripts/lint create mode 100755 scripts/mock create mode 100755 scripts/test create mode 100644 scripts/utils/ruffen-docs.py create mode 100755 scripts/utils/upload-artifact.sh create mode 100644 src/digitalocean_genai_sdk/__init__.py create mode 100644 src/digitalocean_genai_sdk/_base_client.py create mode 100644 src/digitalocean_genai_sdk/_client.py create mode 100644 src/digitalocean_genai_sdk/_compat.py create mode 100644 src/digitalocean_genai_sdk/_constants.py create mode 100644 src/digitalocean_genai_sdk/_exceptions.py create mode 100644 src/digitalocean_genai_sdk/_files.py create mode 100644 src/digitalocean_genai_sdk/_models.py create mode 100644 src/digitalocean_genai_sdk/_qs.py create mode 100644 src/digitalocean_genai_sdk/_resource.py create mode 100644 src/digitalocean_genai_sdk/_response.py create mode 100644 src/digitalocean_genai_sdk/_streaming.py create mode 100644 src/digitalocean_genai_sdk/_types.py create mode 100644 src/digitalocean_genai_sdk/_utils/__init__.py create mode 100644 src/digitalocean_genai_sdk/_utils/_logs.py create mode 100644 src/digitalocean_genai_sdk/_utils/_proxy.py create mode 100644 src/digitalocean_genai_sdk/_utils/_reflection.py create mode 100644 src/digitalocean_genai_sdk/_utils/_resources_proxy.py create mode 100644 src/digitalocean_genai_sdk/_utils/_streams.py create mode 100644 src/digitalocean_genai_sdk/_utils/_sync.py create mode 100644 src/digitalocean_genai_sdk/_utils/_transform.py create mode 100644 src/digitalocean_genai_sdk/_utils/_typing.py create mode 100644 src/digitalocean_genai_sdk/_utils/_utils.py create mode 100644 src/digitalocean_genai_sdk/_version.py create mode 100644 src/digitalocean_genai_sdk/lib/.keep create mode 100644 src/digitalocean_genai_sdk/py.typed create mode 100644 src/digitalocean_genai_sdk/resources/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/assistants.py create mode 100644 src/digitalocean_genai_sdk/resources/audio.py create mode 100644 src/digitalocean_genai_sdk/resources/batches.py create mode 100644 src/digitalocean_genai_sdk/resources/chat/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/chat/chat.py create mode 100644 src/digitalocean_genai_sdk/resources/chat/completions.py create mode 100644 src/digitalocean_genai_sdk/resources/completions.py create mode 100644 src/digitalocean_genai_sdk/resources/embeddings.py create mode 100644 src/digitalocean_genai_sdk/resources/files.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py create mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py create mode 100644 src/digitalocean_genai_sdk/resources/images.py create mode 100644 src/digitalocean_genai_sdk/resources/models.py create mode 100644 src/digitalocean_genai_sdk/resources/moderations.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/invites.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/organization.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/projects.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/users.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/usage.py create mode 100644 src/digitalocean_genai_sdk/resources/organization/users.py create mode 100644 src/digitalocean_genai_sdk/resources/realtime.py create mode 100644 src/digitalocean_genai_sdk/resources/responses.py create mode 100644 src/digitalocean_genai_sdk/resources/threads/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/threads/messages.py create mode 100644 src/digitalocean_genai_sdk/resources/threads/runs/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/threads/runs/runs.py create mode 100644 src/digitalocean_genai_sdk/resources/threads/runs/steps.py create mode 100644 src/digitalocean_genai_sdk/resources/threads/threads.py create mode 100644 src/digitalocean_genai_sdk/resources/uploads.py create mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py create mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/files.py create mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py create mode 100644 src/digitalocean_genai_sdk/types/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_object.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_supported_models.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_code.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_code_param.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_file_search.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_function.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_function_param.py create mode 100644 src/digitalocean_genai_sdk/types/assistant_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py create mode 100644 src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py create mode 100644 src/digitalocean_genai_sdk/types/audio_generate_speech_params.py create mode 100644 src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py create mode 100644 src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py create mode 100644 src/digitalocean_genai_sdk/types/audio_translate_audio_params.py create mode 100644 src/digitalocean_genai_sdk/types/audio_translate_audio_response.py create mode 100644 src/digitalocean_genai_sdk/types/audit_log_actor_user.py create mode 100644 src/digitalocean_genai_sdk/types/audit_log_event_type.py create mode 100644 src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py create mode 100644 src/digitalocean_genai_sdk/types/batch.py create mode 100644 src/digitalocean_genai_sdk/types/batch_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/batch_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/batch_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/chat/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/chat/completion_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/chat/create_response.py create mode 100644 src/digitalocean_genai_sdk/types/chat/message_tool_call.py create mode 100644 src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_object.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_text.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_text_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat/response_message.py create mode 100644 src/digitalocean_genai_sdk/types/chat/token_logprob.py create mode 100644 src/digitalocean_genai_sdk/types/chat/usage.py create mode 100644 src/digitalocean_genai_sdk/types/chat/web_search_context_size.py create mode 100644 src/digitalocean_genai_sdk/types/chat/web_search_location.py create mode 100644 src/digitalocean_genai_sdk/types/chat/web_search_location_param.py create mode 100644 src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py create mode 100644 src/digitalocean_genai_sdk/types/comparison_filter.py create mode 100644 src/digitalocean_genai_sdk/types/comparison_filter_param.py create mode 100644 src/digitalocean_genai_sdk/types/completion_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/completion_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/compound_filter.py create mode 100644 src/digitalocean_genai_sdk/types/compound_filter_param.py create mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call.py create mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_output.py create mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py create mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_param.py create mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py create mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py create mode 100644 src/digitalocean_genai_sdk/types/create_thread_request_param.py create mode 100644 src/digitalocean_genai_sdk/types/embedding_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/embedding_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/file_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/file_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/file_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/file_retrieve_content_response.py create mode 100644 src/digitalocean_genai_sdk/types/file_search_ranker.py create mode 100644 src/digitalocean_genai_sdk/types/file_search_tool_call.py create mode 100644 src/digitalocean_genai_sdk/types/file_search_tool_call_param.py create mode 100644 src/digitalocean_genai_sdk/types/file_upload_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py create mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/function_object.py create mode 100644 src/digitalocean_genai_sdk/types/function_object_param.py create mode 100644 src/digitalocean_genai_sdk/types/function_tool_call.py create mode 100644 src/digitalocean_genai_sdk/types/function_tool_call_output.py create mode 100644 src/digitalocean_genai_sdk/types/function_tool_call_output_param.py create mode 100644 src/digitalocean_genai_sdk/types/function_tool_call_param.py create mode 100644 src/digitalocean_genai_sdk/types/image_create_edit_params.py create mode 100644 src/digitalocean_genai_sdk/types/image_create_generation_params.py create mode 100644 src/digitalocean_genai_sdk/types/image_create_variation_params.py create mode 100644 src/digitalocean_genai_sdk/types/images_response.py create mode 100644 src/digitalocean_genai_sdk/types/includable.py create mode 100644 src/digitalocean_genai_sdk/types/input_content.py create mode 100644 src/digitalocean_genai_sdk/types/input_content_param.py create mode 100644 src/digitalocean_genai_sdk/types/input_message.py create mode 100644 src/digitalocean_genai_sdk/types/input_message_param.py create mode 100644 src/digitalocean_genai_sdk/types/model.py create mode 100644 src/digitalocean_genai_sdk/types/model_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/model_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/model_response_properties.py create mode 100644 src/digitalocean_genai_sdk/types/moderation_classify_params.py create mode 100644 src/digitalocean_genai_sdk/types/moderation_classify_response.py create mode 100644 src/digitalocean_genai_sdk/types/openai_file.py create mode 100644 src/digitalocean_genai_sdk/types/organization/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key.py create mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/invite.py create mode 100644 src/digitalocean_genai_sdk/types/organization/invite_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/invite_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/invite_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/invite_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/organization_user.py create mode 100644 src/digitalocean_genai_sdk/types/organization/project.py create mode 100644 src/digitalocean_genai_sdk/types/organization/project_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/project_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/project_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/project_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/project_user.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_completions_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_images_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/user_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/user_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization/user_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/organization/user_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization_get_costs_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py create mode 100644 src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py create mode 100644 src/digitalocean_genai_sdk/types/output_message.py create mode 100644 src/digitalocean_genai_sdk/types/output_message_param.py create mode 100644 src/digitalocean_genai_sdk/types/realtime_create_session_params.py create mode 100644 src/digitalocean_genai_sdk/types/realtime_create_session_response.py create mode 100644 src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py create mode 100644 src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py create mode 100644 src/digitalocean_genai_sdk/types/reasoning_effort.py create mode 100644 src/digitalocean_genai_sdk/types/reasoning_item.py create mode 100644 src/digitalocean_genai_sdk/types/reasoning_item_param.py create mode 100644 src/digitalocean_genai_sdk/types/response.py create mode 100644 src/digitalocean_genai_sdk/types/response_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/response_list_input_items_params.py create mode 100644 src/digitalocean_genai_sdk/types/response_list_input_items_response.py create mode 100644 src/digitalocean_genai_sdk/types/response_properties.py create mode 100644 src/digitalocean_genai_sdk/types/response_retrieve_params.py create mode 100644 src/digitalocean_genai_sdk/types/static_chunking_strategy.py create mode 100644 src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py create mode 100644 src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py create mode 100644 src/digitalocean_genai_sdk/types/stop_configuration_param.py create mode 100644 src/digitalocean_genai_sdk/types/thread_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/thread_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/thread_object.py create mode 100644 src/digitalocean_genai_sdk/types/thread_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py create mode 100644 src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py create mode 100644 src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py create mode 100644 src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py create mode 100644 src/digitalocean_genai_sdk/types/threads/create_message_request_param.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_object.py create mode 100644 src/digitalocean_genai_sdk/types/threads/message_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_create_run_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_object.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/run_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/runs/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py create mode 100644 src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py create mode 100644 src/digitalocean_genai_sdk/types/threads/truncation_object.py create mode 100644 src/digitalocean_genai_sdk/types/threads/truncation_object_param.py create mode 100644 src/digitalocean_genai_sdk/types/transcription_segment.py create mode 100644 src/digitalocean_genai_sdk/types/upload.py create mode 100644 src/digitalocean_genai_sdk/types/upload_add_part_params.py create mode 100644 src/digitalocean_genai_sdk/types/upload_add_part_response.py create mode 100644 src/digitalocean_genai_sdk/types/upload_complete_params.py create mode 100644 src/digitalocean_genai_sdk/types/upload_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/usage_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_expiration_after.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_object.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_search_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_search_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_store_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py create mode 100644 src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py create mode 100644 src/digitalocean_genai_sdk/types/voice_ids_shared.py create mode 100644 src/digitalocean_genai_sdk/types/voice_ids_shared_param.py create mode 100644 src/digitalocean_genai_sdk/types/web_search_tool_call.py create mode 100644 src/digitalocean_genai_sdk/types/web_search_tool_call_param.py create mode 100644 tests/__init__.py create mode 100644 tests/api_resources/__init__.py create mode 100644 tests/api_resources/chat/__init__.py create mode 100644 tests/api_resources/chat/test_completions.py create mode 100644 tests/api_resources/fine_tuning/__init__.py create mode 100644 tests/api_resources/fine_tuning/checkpoints/__init__.py create mode 100644 tests/api_resources/fine_tuning/checkpoints/test_permissions.py create mode 100644 tests/api_resources/fine_tuning/jobs/__init__.py create mode 100644 tests/api_resources/fine_tuning/jobs/test_checkpoints.py create mode 100644 tests/api_resources/fine_tuning/jobs/test_events.py create mode 100644 tests/api_resources/fine_tuning/test_jobs.py create mode 100644 tests/api_resources/organization/__init__.py create mode 100644 tests/api_resources/organization/projects/__init__.py create mode 100644 tests/api_resources/organization/projects/test_api_keys.py create mode 100644 tests/api_resources/organization/projects/test_rate_limits.py create mode 100644 tests/api_resources/organization/projects/test_service_accounts.py create mode 100644 tests/api_resources/organization/projects/test_users.py create mode 100644 tests/api_resources/organization/test_admin_api_keys.py create mode 100644 tests/api_resources/organization/test_invites.py create mode 100644 tests/api_resources/organization/test_projects.py create mode 100644 tests/api_resources/organization/test_usage.py create mode 100644 tests/api_resources/organization/test_users.py create mode 100644 tests/api_resources/test_assistants.py create mode 100644 tests/api_resources/test_audio.py create mode 100644 tests/api_resources/test_batches.py create mode 100644 tests/api_resources/test_completions.py create mode 100644 tests/api_resources/test_embeddings.py create mode 100644 tests/api_resources/test_files.py create mode 100644 tests/api_resources/test_images.py create mode 100644 tests/api_resources/test_models.py create mode 100644 tests/api_resources/test_moderations.py create mode 100644 tests/api_resources/test_organization.py create mode 100644 tests/api_resources/test_realtime.py create mode 100644 tests/api_resources/test_responses.py create mode 100644 tests/api_resources/test_threads.py create mode 100644 tests/api_resources/test_uploads.py create mode 100644 tests/api_resources/test_vector_stores.py create mode 100644 tests/api_resources/threads/__init__.py create mode 100644 tests/api_resources/threads/runs/__init__.py create mode 100644 tests/api_resources/threads/runs/test_steps.py create mode 100644 tests/api_resources/threads/test_messages.py create mode 100644 tests/api_resources/threads/test_runs.py create mode 100644 tests/api_resources/vector_stores/__init__.py create mode 100644 tests/api_resources/vector_stores/test_file_batches.py create mode 100644 tests/api_resources/vector_stores/test_files.py create mode 100644 tests/conftest.py create mode 100644 tests/sample_file.txt create mode 100644 tests/test_client.py create mode 100644 tests/test_deepcopy.py create mode 100644 tests/test_extract_files.py create mode 100644 tests/test_files.py create mode 100644 tests/test_models.py create mode 100644 tests/test_qs.py create mode 100644 tests/test_required_args.py create mode 100644 tests/test_response.py create mode 100644 tests/test_streaming.py create mode 100644 tests/test_transform.py create mode 100644 tests/test_utils/test_proxy.py create mode 100644 tests/test_utils/test_typing.py create mode 100644 tests/utils.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000..ff261bad --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,9 @@ +ARG VARIANT="3.9" +FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} + +USER vscode + +RUN curl -sSf https://rye.astral.sh/get | RYE_VERSION="0.44.0" RYE_INSTALL_OPTION="--yes" bash +ENV PATH=/home/vscode/.rye/shims:$PATH + +RUN echo "[[ -d .venv ]] && source .venv/bin/activate || export PATH=\$PATH" >> /home/vscode/.bashrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..c17fdc16 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,43 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/debian +{ + "name": "Debian", + "build": { + "dockerfile": "Dockerfile", + "context": ".." + }, + + "postStartCommand": "rye sync --all-features", + + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "python.pythonPath": ".venv/bin/python", + "python.defaultInterpreterPath": ".venv/bin/python", + "python.typeChecking": "basic", + "terminal.integrated.env.linux": { + "PATH": "/home/vscode/.rye/shims:${env:PATH}" + } + } + } + }, + "features": { + "ghcr.io/devcontainers/features/node:1": {} + } + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..88980c93 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,76 @@ +name: CI +on: + push: + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' + +jobs: + lint: + timeout-minutes: 10 + name: lint + runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run lints + run: ./scripts/lint + + upload: + if: github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' + timeout-minutes: 10 + name: upload + permissions: + contents: read + id-token: write + runs-on: depot-ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Get GitHub OIDC Token + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + + test: + timeout-minutes: 10 + name: test + runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..87797408 --- /dev/null +++ b/.gitignore @@ -0,0 +1,16 @@ +.prism.log +.vscode +_dev + +__pycache__ +.mypy_cache + +dist + +.venv +.idea + +.env +.envrc +codegen.log +Brewfile.lock.json diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..43077b24 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.9.18 diff --git a/.stats.yml b/.stats.yml new file mode 100644 index 00000000..84a850f9 --- /dev/null +++ b/.stats.yml @@ -0,0 +1,4 @@ +configured_endpoints: 126 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml +openapi_spec_hash: 686329a97002025d118dc2367755c18d +config_hash: 39a1554af43cd406e37b5ed5c943649c diff --git a/Brewfile b/Brewfile new file mode 100644 index 00000000..492ca37b --- /dev/null +++ b/Brewfile @@ -0,0 +1,2 @@ +brew "rye" + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..79f5523c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,129 @@ +## Setting up the environment + +### With Rye + +We use [Rye](https://rye.astral.sh/) to manage dependencies because it will automatically provision a Python environment with the expected Python version. To set it up, run: + +```sh +$ ./scripts/bootstrap +``` + +Or [install Rye manually](https://rye.astral.sh/guide/installation/) and run: + +```sh +$ rye sync --all-features +``` + +You can then run scripts using `rye run python script.py` or by activating the virtual environment: + +```sh +$ rye shell +# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work +$ source .venv/bin/activate + +# now you can omit the `rye run` prefix +$ python script.py +``` + +### Without Rye + +Alternatively if you don't want to install `Rye`, you can stick with the standard `pip` setup by ensuring you have the Python version specified in `.python-version`, create a virtual environment however you desire and then install dependencies using this command: + +```sh +$ pip install -r requirements-dev.lock +``` + +## Modifying/Adding code + +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may +result in merge conflicts between manual patches and changes from the generator. The generator will never +modify the contents of the `src/digitalocean_genai_sdk/lib/` and `examples/` directories. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. + +```py +# add an example to examples/.py + +#!/usr/bin/env -S rye run python +… +``` + +```sh +$ chmod +x examples/.py +# run the example against your api +$ ./examples/.py +``` + +## Using the repository from source + +If you’d like to use the repository from source, you can either install from git or link to a cloned repository: + +To install via git: + +```sh +$ pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git +``` + +Alternatively, you can build from source and install the wheel file: + +Building this package will create two files in the `dist/` directory, a `.tar.gz` containing the source files and a `.whl` that can be used to install the package efficiently. + +To create a distributable version of the library, all you have to do is run this command: + +```sh +$ rye build +# or +$ python -m build +``` + +Then to install: + +```sh +$ pip install ./path-to-wheel-file.whl +``` + +## Running tests + +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. + +```sh +# you will need npm installed +$ npx prism mock path/to/your/openapi.yml +``` + +```sh +$ ./scripts/test +``` + +## Linting and formatting + +This repository uses [ruff](https://github.com/astral-sh/ruff) and +[black](https://github.com/psf/black) to format the code in the repository. + +To lint: + +```sh +$ ./scripts/lint +``` + +To format and fix all ruff issues automatically: + +```sh +$ ./scripts/format +``` + +## Publishing and releases + +Changes made to this repository via the automated release PR pipeline should publish to PyPI automatically. If +the changes aren't made through the automated pipeline, you may want to make releases manually. + +### Publish with a GitHub workflow + +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. + +### Publish manually + +If you need to manually release a package, you can run the `bin/publish-pypi` script with a `PYPI_TOKEN` set on +the environment. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..9c99266b --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +Copyright 2025 digitalocean-genai-sdk + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index b53f1288..b9fcd7e8 100644 --- a/README.md +++ b/README.md @@ -1 +1,384 @@ -# digitalocean-genai-sdk-python \ No newline at end of file +# Digitalocean Genai SDK Python API library + +[![PyPI version](https://img.shields.io/pypi/v/digitalocean_genai_sdk.svg)](https://pypi.org/project/digitalocean_genai_sdk/) + +The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ +application. The library includes type definitions for all request params and response fields, +and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). + +It is generated with [Stainless](https://www.stainless.com/). + +## Documentation + +The REST API documentation can be found on [help.openai.com](https://help.openai.com/). The full API of this library can be found in [api.md](api.md). + +## Installation + +```sh +# install from this staging repo +pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git +``` + +> [!NOTE] +> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre digitalocean_genai_sdk` + +## Usage + +The full API of this library can be found in [api.md](api.md). + +```python +import os +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +client = DigitaloceanGenaiSDK( + api_key=os.environ.get( + "DIGITALOCEAN_GENAI_SDK_API_KEY" + ), # This is the default and can be omitted +) + +assistants = client.assistants.list() +print(assistants.first_id) +``` + +While you can provide an `api_key` keyword argument, +we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) +to add `DIGITALOCEAN_GENAI_SDK_API_KEY="My API Key"` to your `.env` file +so that your API Key is not stored in source control. + +## Async usage + +Simply import `AsyncDigitaloceanGenaiSDK` instead of `DigitaloceanGenaiSDK` and use `await` with each API call: + +```python +import os +import asyncio +from digitalocean_genai_sdk import AsyncDigitaloceanGenaiSDK + +client = AsyncDigitaloceanGenaiSDK( + api_key=os.environ.get( + "DIGITALOCEAN_GENAI_SDK_API_KEY" + ), # This is the default and can be omitted +) + + +async def main() -> None: + assistants = await client.assistants.list() + print(assistants.first_id) + + +asyncio.run(main()) +``` + +Functionality between the synchronous and asynchronous clients is otherwise identical. + +## Using types + +Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: + +- Serializing back into JSON, `model.to_json()` +- Converting to a dictionary, `model.to_dict()` + +Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`. + +## Nested params + +Nested parameters are dictionaries, typed using `TypedDict`, for example: + +```python +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +client = DigitaloceanGenaiSDK() + +assistant_object = client.assistants.create( + model="gpt-4o", + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, +) +print(assistant_object.tool_resources) +``` + +## File uploads + +Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`. + +```python +from pathlib import Path +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +client = DigitaloceanGenaiSDK() + +client.audio.transcribe_audio( + file=Path("/path/to/file"), + model="gpt-4o-transcribe", +) +``` + +The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically. + +## Handling errors + +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised. + +When the API returns a non-success status code (that is, 4xx or 5xx +response), a subclass of `digitalocean_genai_sdk.APIStatusError` is raised, containing `status_code` and `response` properties. + +All errors inherit from `digitalocean_genai_sdk.APIError`. + +```python +import digitalocean_genai_sdk +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +client = DigitaloceanGenaiSDK() + +try: + client.assistants.list() +except digitalocean_genai_sdk.APIConnectionError as e: + print("The server could not be reached") + print(e.__cause__) # an underlying Exception, likely raised within httpx. +except digitalocean_genai_sdk.RateLimitError as e: + print("A 429 status code was received; we should back off a bit.") +except digitalocean_genai_sdk.APIStatusError as e: + print("Another non-200-range status code was received") + print(e.status_code) + print(e.response) +``` + +Error codes are as follows: + +| Status Code | Error Type | +| ----------- | -------------------------- | +| 400 | `BadRequestError` | +| 401 | `AuthenticationError` | +| 403 | `PermissionDeniedError` | +| 404 | `NotFoundError` | +| 422 | `UnprocessableEntityError` | +| 429 | `RateLimitError` | +| >=500 | `InternalServerError` | +| N/A | `APIConnectionError` | + +### Retries + +Certain errors are automatically retried 2 times by default, with a short exponential backoff. +Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, +429 Rate Limit, and >=500 Internal errors are all retried by default. + +You can use the `max_retries` option to configure or disable retry settings: + +```python +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +# Configure the default for all requests: +client = DigitaloceanGenaiSDK( + # default is 2 + max_retries=0, +) + +# Or, configure per-request: +client.with_options(max_retries=5).assistants.list() +``` + +### Timeouts + +By default requests time out after 1 minute. You can configure this with a `timeout` option, +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: + +```python +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +# Configure the default for all requests: +client = DigitaloceanGenaiSDK( + # 20 seconds (default is 1 minute) + timeout=20.0, +) + +# More granular control: +client = DigitaloceanGenaiSDK( + timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), +) + +# Override per-request: +client.with_options(timeout=5.0).assistants.list() +``` + +On timeout, an `APITimeoutError` is thrown. + +Note that requests that time out are [retried twice by default](#retries). + +## Advanced + +### Logging + +We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. + +You can enable logging by setting the environment variable `DIGITALOCEAN_GENAI_SDK_LOG` to `info`. + +```shell +$ export DIGITALOCEAN_GENAI_SDK_LOG=info +``` + +Or to `debug` for more verbose logging. + +### How to tell whether `None` means `null` or missing + +In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`: + +```py +if response.my_field is None: + if 'my_field' not in response.model_fields_set: + print('Got json like {}, without a "my_field" key present at all.') + else: + print('Got json like {"my_field": null}.') +``` + +### Accessing raw response data (e.g. headers) + +The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., + +```py +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +client = DigitaloceanGenaiSDK() +response = client.assistants.with_raw_response.list() +print(response.headers.get('X-My-Header')) + +assistant = response.parse() # get the object that `assistants.list()` would have returned +print(assistant.first_id) +``` + +These methods return an [`APIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. + +The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. + +#### `.with_streaming_response` + +The above interface eagerly reads the full response body when you make the request, which may not always be what you want. + +To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. + +```python +with client.assistants.with_streaming_response.list() as response: + print(response.headers.get("X-My-Header")) + + for line in response.iter_lines(): + print(line) +``` + +The context manager is required so that the response will reliably be closed. + +### Making custom/undocumented requests + +This library is typed for convenient access to the documented API. + +If you need to access undocumented endpoints, params, or response properties, the library can still be used. + +#### Undocumented endpoints + +To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other +http verbs. Options on the client will be respected (such as retries) when making this request. + +```py +import httpx + +response = client.post( + "/foo", + cast_to=httpx.Response, + body={"my_param": True}, +) + +print(response.headers.get("x-foo")) +``` + +#### Undocumented request params + +If you want to explicitly send an extra param, you can do so with the `extra_query`, `extra_body`, and `extra_headers` request +options. + +#### Undocumented response properties + +To access undocumented response properties, you can access the extra fields like `response.unknown_prop`. You +can also get all the extra fields on the Pydantic model as a dict with +[`response.model_extra`](https://docs.pydantic.dev/latest/api/base_model/#pydantic.BaseModel.model_extra). + +### Configuring the HTTP client + +You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including: + +- Support for [proxies](https://www.python-httpx.org/advanced/proxies/) +- Custom [transports](https://www.python-httpx.org/advanced/transports/) +- Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality + +```python +import httpx +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, DefaultHttpxClient + +client = DigitaloceanGenaiSDK( + # Or use the `DIGITALOCEAN_GENAI_SDK_BASE_URL` env var + base_url="http://my.test.server.example.com:8083", + http_client=DefaultHttpxClient( + proxy="http://my.test.proxy.example.com", + transport=httpx.HTTPTransport(local_address="0.0.0.0"), + ), +) +``` + +You can also customize the client on a per-request basis by using `with_options()`: + +```python +client.with_options(http_client=DefaultHttpxClient(...)) +``` + +### Managing HTTP resources + +By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. + +```py +from digitalocean_genai_sdk import DigitaloceanGenaiSDK + +with DigitaloceanGenaiSDK() as client: + # make requests here + ... + +# HTTP client is now closed +``` + +## Versioning + +This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: + +1. Changes that only affect static types, without breaking runtime behavior. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ +3. Changes that we do not expect to impact the vast majority of users in practice. + +We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. + +We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/issues) with questions, bugs, or suggestions. + +### Determining the installed version + +If you've upgraded to the latest version but aren't seeing any new features you were expecting then your python environment is likely still using an older version. + +You can determine the version that is being used at runtime with: + +```py +import digitalocean_genai_sdk +print(digitalocean_genai_sdk.__version__) +``` + +## Requirements + +Python 3.8 or higher. + +## Contributing + +See [the contributing documentation](./CONTRIBUTING.md). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..d08f7996 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,23 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainless.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by Digitalocean Genai SDK, please follow the respective company's security reporting guidelines. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/api.md b/api.md new file mode 100644 index 00000000..daea5075 --- /dev/null +++ b/api.md @@ -0,0 +1,627 @@ +# Assistants + +Types: + +```python +from digitalocean_genai_sdk.types import ( + AssistantObject, + AssistantSupportedModels, + AssistantToolsCode, + AssistantToolsFileSearch, + AssistantToolsFunction, + AssistantsAPIResponseFormatOption, + FileSearchRanker, + FunctionObject, + ReasoningEffort, + AssistantListResponse, + AssistantDeleteResponse, +) +``` + +Methods: + +- client.assistants.create(\*\*params) -> AssistantObject +- client.assistants.retrieve(assistant_id) -> AssistantObject +- client.assistants.update(assistant_id, \*\*params) -> AssistantObject +- client.assistants.list(\*\*params) -> AssistantListResponse +- client.assistants.delete(assistant_id) -> AssistantDeleteResponse + +# Audio + +Types: + +```python +from digitalocean_genai_sdk.types import ( + TranscriptionSegment, + VoiceIDsShared, + AudioTranscribeAudioResponse, + AudioTranslateAudioResponse, +) +``` + +Methods: + +- client.audio.generate_speech(\*\*params) -> BinaryAPIResponse +- client.audio.transcribe_audio(\*\*params) -> AudioTranscribeAudioResponse +- client.audio.translate_audio(\*\*params) -> AudioTranslateAudioResponse + +# Batches + +Types: + +```python +from digitalocean_genai_sdk.types import Batch, BatchListResponse +``` + +Methods: + +- client.batches.create(\*\*params) -> Batch +- client.batches.retrieve(batch_id) -> Batch +- client.batches.list(\*\*params) -> BatchListResponse +- client.batches.cancel(batch_id) -> Batch + +# Chat + +## Completions + +Types: + +```python +from digitalocean_genai_sdk.types.chat import ( + CreateModelProperties, + CreateResponse, + MessageToolCall, + ModelIDsShared, + RequestMessageContentPartText, + ResponseFormatJsonObject, + ResponseFormatJsonSchema, + ResponseFormatText, + ResponseMessage, + TokenLogprob, + Usage, + WebSearchContextSize, + WebSearchLocation, + CompletionListResponse, + CompletionDeleteResponse, + CompletionListMessagesResponse, +) +``` + +Methods: + +- client.chat.completions.create(\*\*params) -> CreateResponse +- client.chat.completions.retrieve(completion_id) -> CreateResponse +- client.chat.completions.update(completion_id, \*\*params) -> CreateResponse +- client.chat.completions.list(\*\*params) -> CompletionListResponse +- client.chat.completions.delete(completion_id) -> CompletionDeleteResponse +- client.chat.completions.list_messages(completion_id, \*\*params) -> CompletionListMessagesResponse + +# Completions + +Types: + +```python +from digitalocean_genai_sdk.types import ( + ChatCompletionStreamOptions, + StopConfiguration, + CompletionCreateResponse, +) +``` + +Methods: + +- client.completions.create(\*\*params) -> CompletionCreateResponse + +# Embeddings + +Types: + +```python +from digitalocean_genai_sdk.types import EmbeddingCreateResponse +``` + +Methods: + +- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse + +# Files + +Types: + +```python +from digitalocean_genai_sdk.types import ( + OpenAIFile, + FileListResponse, + FileDeleteResponse, + FileRetrieveContentResponse, +) +``` + +Methods: + +- client.files.retrieve(file_id) -> OpenAIFile +- client.files.list(\*\*params) -> FileListResponse +- client.files.delete(file_id) -> FileDeleteResponse +- client.files.retrieve_content(file_id) -> str +- client.files.upload(\*\*params) -> OpenAIFile + +# FineTuning + +## Checkpoints + +### Permissions + +Types: + +```python +from digitalocean_genai_sdk.types.fine_tuning.checkpoints import ( + ListFineTuningCheckpointPermission, + PermissionDeleteResponse, +) +``` + +Methods: + +- client.fine_tuning.checkpoints.permissions.create(permission_id, \*\*params) -> ListFineTuningCheckpointPermission +- client.fine_tuning.checkpoints.permissions.retrieve(permission_id, \*\*params) -> ListFineTuningCheckpointPermission +- client.fine_tuning.checkpoints.permissions.delete(permission_id) -> PermissionDeleteResponse + +## Jobs + +Types: + +```python +from digitalocean_genai_sdk.types.fine_tuning import FineTuneMethod, FineTuningJob, JobListResponse +``` + +Methods: + +- client.fine_tuning.jobs.create(\*\*params) -> FineTuningJob +- client.fine_tuning.jobs.retrieve(fine_tuning_job_id) -> FineTuningJob +- client.fine_tuning.jobs.list(\*\*params) -> JobListResponse +- client.fine_tuning.jobs.cancel(fine_tuning_job_id) -> FineTuningJob + +### Checkpoints + +Types: + +```python +from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse +``` + +Methods: + +- client.fine_tuning.jobs.checkpoints.retrieve(fine_tuning_job_id, \*\*params) -> CheckpointRetrieveResponse + +### Events + +Types: + +```python +from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse +``` + +Methods: + +- client.fine_tuning.jobs.events.retrieve(fine_tuning_job_id, \*\*params) -> EventRetrieveResponse + +# Images + +Types: + +```python +from digitalocean_genai_sdk.types import ImagesResponse +``` + +Methods: + +- client.images.create_edit(\*\*params) -> ImagesResponse +- client.images.create_generation(\*\*params) -> ImagesResponse +- client.images.create_variation(\*\*params) -> ImagesResponse + +# Models + +Types: + +```python +from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse +``` + +Methods: + +- client.models.retrieve(model) -> Model +- client.models.list() -> ModelListResponse +- client.models.delete(model) -> ModelDeleteResponse + +# Moderations + +Types: + +```python +from digitalocean_genai_sdk.types import ModerationClassifyResponse +``` + +Methods: + +- client.moderations.classify(\*\*params) -> ModerationClassifyResponse + +# Organization + +Types: + +```python +from digitalocean_genai_sdk.types import ( + AuditLogActorUser, + AuditLogEventType, + UsageResponse, + OrganizationListAuditLogsResponse, +) +``` + +Methods: + +- client.organization.get_costs(\*\*params) -> UsageResponse +- client.organization.list_audit_logs(\*\*params) -> OrganizationListAuditLogsResponse + +## AdminAPIKeys + +Types: + +```python +from digitalocean_genai_sdk.types.organization import ( + AdminAPIKey, + AdminAPIKeyListResponse, + AdminAPIKeyDeleteResponse, +) +``` + +Methods: + +- client.organization.admin_api_keys.create(\*\*params) -> AdminAPIKey +- client.organization.admin_api_keys.retrieve(key_id) -> AdminAPIKey +- client.organization.admin_api_keys.list(\*\*params) -> AdminAPIKeyListResponse +- client.organization.admin_api_keys.delete(key_id) -> AdminAPIKeyDeleteResponse + +## Invites + +Types: + +```python +from digitalocean_genai_sdk.types.organization import ( + Invite, + InviteListResponse, + InviteDeleteResponse, +) +``` + +Methods: + +- client.organization.invites.create(\*\*params) -> Invite +- client.organization.invites.retrieve(invite_id) -> Invite +- client.organization.invites.list(\*\*params) -> InviteListResponse +- client.organization.invites.delete(invite_id) -> InviteDeleteResponse + +## Projects + +Types: + +```python +from digitalocean_genai_sdk.types.organization import Project, ProjectListResponse +``` + +Methods: + +- client.organization.projects.create(\*\*params) -> Project +- client.organization.projects.retrieve(project_id) -> Project +- client.organization.projects.update(project_id, \*\*params) -> Project +- client.organization.projects.list(\*\*params) -> ProjectListResponse +- client.organization.projects.archive(project_id) -> Project + +### APIKeys + +Types: + +```python +from digitalocean_genai_sdk.types.organization.projects import ( + APIKey, + APIKeyListResponse, + APIKeyDeleteResponse, +) +``` + +Methods: + +- client.organization.projects.api_keys.retrieve(key_id, \*, project_id) -> APIKey +- client.organization.projects.api_keys.list(project_id, \*\*params) -> APIKeyListResponse +- client.organization.projects.api_keys.delete(key_id, \*, project_id) -> APIKeyDeleteResponse + +### RateLimits + +Types: + +```python +from digitalocean_genai_sdk.types.organization.projects import RateLimit, RateLimitListResponse +``` + +Methods: + +- client.organization.projects.rate_limits.update(rate_limit_id, \*, project_id, \*\*params) -> RateLimit +- client.organization.projects.rate_limits.list(project_id, \*\*params) -> RateLimitListResponse + +### ServiceAccounts + +Types: + +```python +from digitalocean_genai_sdk.types.organization.projects import ( + ServiceAccount, + ServiceAccountCreateResponse, + ServiceAccountListResponse, + ServiceAccountDeleteResponse, +) +``` + +Methods: + +- client.organization.projects.service_accounts.create(project_id, \*\*params) -> ServiceAccountCreateResponse +- client.organization.projects.service_accounts.retrieve(service_account_id, \*, project_id) -> ServiceAccount +- client.organization.projects.service_accounts.list(project_id, \*\*params) -> ServiceAccountListResponse +- client.organization.projects.service_accounts.delete(service_account_id, \*, project_id) -> ServiceAccountDeleteResponse + +### Users + +Types: + +```python +from digitalocean_genai_sdk.types.organization.projects import ( + ProjectUser, + UserListResponse, + UserDeleteResponse, +) +``` + +Methods: + +- client.organization.projects.users.retrieve(user_id, \*, project_id) -> ProjectUser +- client.organization.projects.users.update(user_id, \*, project_id, \*\*params) -> ProjectUser +- client.organization.projects.users.list(project_id, \*\*params) -> UserListResponse +- client.organization.projects.users.delete(user_id, \*, project_id) -> UserDeleteResponse +- client.organization.projects.users.add(project_id, \*\*params) -> ProjectUser + +## Usage + +Methods: + +- client.organization.usage.audio_speeches(\*\*params) -> UsageResponse +- client.organization.usage.audio_transcriptions(\*\*params) -> UsageResponse +- client.organization.usage.code_interpreter_sessions(\*\*params) -> UsageResponse +- client.organization.usage.completions(\*\*params) -> UsageResponse +- client.organization.usage.embeddings(\*\*params) -> UsageResponse +- client.organization.usage.images(\*\*params) -> UsageResponse +- client.organization.usage.moderations(\*\*params) -> UsageResponse +- client.organization.usage.vector_stores(\*\*params) -> UsageResponse + +## Users + +Types: + +```python +from digitalocean_genai_sdk.types.organization import ( + OrganizationUser, + UserListResponse, + UserDeleteResponse, +) +``` + +Methods: + +- client.organization.users.retrieve(user_id) -> OrganizationUser +- client.organization.users.update(user_id, \*\*params) -> OrganizationUser +- client.organization.users.list(\*\*params) -> UserListResponse +- client.organization.users.delete(user_id) -> UserDeleteResponse + +# Realtime + +Types: + +```python +from digitalocean_genai_sdk.types import ( + RealtimeCreateSessionResponse, + RealtimeCreateTranscriptionSessionResponse, +) +``` + +Methods: + +- client.realtime.create_session(\*\*params) -> RealtimeCreateSessionResponse +- client.realtime.create_transcription_session(\*\*params) -> RealtimeCreateTranscriptionSessionResponse + +# Responses + +Types: + +```python +from digitalocean_genai_sdk.types import ( + ComputerToolCall, + ComputerToolCallOutput, + ComputerToolCallSafetyCheck, + FileSearchToolCall, + FunctionToolCall, + FunctionToolCallOutput, + Includable, + InputContent, + InputMessage, + ModelResponseProperties, + OutputMessage, + ReasoningItem, + Response, + ResponseProperties, + WebSearchToolCall, + ResponseListInputItemsResponse, +) +``` + +Methods: + +- client.responses.create(\*\*params) -> Response +- client.responses.retrieve(response_id, \*\*params) -> Response +- client.responses.delete(response_id) -> None +- client.responses.list_input_items(response_id, \*\*params) -> ResponseListInputItemsResponse + +# Threads + +Types: + +```python +from digitalocean_genai_sdk.types import CreateThreadRequest, ThreadObject, ThreadDeleteResponse +``` + +Methods: + +- client.threads.create(\*\*params) -> ThreadObject +- client.threads.retrieve(thread_id) -> ThreadObject +- client.threads.update(thread_id, \*\*params) -> ThreadObject +- client.threads.delete(thread_id) -> ThreadDeleteResponse + +## Runs + +Types: + +```python +from digitalocean_genai_sdk.types.threads import ( + AssistantsAPIToolChoiceOption, + RunObject, + TruncationObject, + RunListResponse, +) +``` + +Methods: + +- client.threads.runs.create(\*\*params) -> RunObject +- client.threads.runs.retrieve(run_id, \*, thread_id) -> RunObject +- client.threads.runs.update(run_id, \*, thread_id, \*\*params) -> RunObject +- client.threads.runs.list(thread_id, \*\*params) -> RunListResponse +- client.threads.runs.cancel(run_id, \*, thread_id) -> RunObject +- client.threads.runs.create_run(thread_id, \*\*params) -> RunObject +- client.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> RunObject + +### Steps + +Types: + +```python +from digitalocean_genai_sdk.types.threads.runs import RunStepObject, StepListResponse +``` + +Methods: + +- client.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id, \*\*params) -> RunStepObject +- client.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> StepListResponse + +## Messages + +Types: + +```python +from digitalocean_genai_sdk.types.threads import ( + AssistantToolsFileSearchTypeOnly, + CreateMessageRequest, + MessageContentImageFileObject, + MessageContentImageURLObject, + MessageObject, + MessageListResponse, + MessageDeleteResponse, +) +``` + +Methods: + +- client.threads.messages.create(thread_id, \*\*params) -> MessageObject +- client.threads.messages.retrieve(message_id, \*, thread_id) -> MessageObject +- client.threads.messages.update(message_id, \*, thread_id, \*\*params) -> MessageObject +- client.threads.messages.list(thread_id, \*\*params) -> MessageListResponse +- client.threads.messages.delete(message_id, \*, thread_id) -> MessageDeleteResponse + +# Uploads + +Types: + +```python +from digitalocean_genai_sdk.types import Upload, UploadAddPartResponse +``` + +Methods: + +- client.uploads.create(\*\*params) -> Upload +- client.uploads.add_part(upload_id, \*\*params) -> UploadAddPartResponse +- client.uploads.cancel(upload_id) -> Upload +- client.uploads.complete(upload_id, \*\*params) -> Upload + +# VectorStores + +Types: + +```python +from digitalocean_genai_sdk.types import ( + AutoChunkingStrategyRequestParam, + ComparisonFilter, + CompoundFilter, + StaticChunkingStrategy, + StaticChunkingStrategyRequestParam, + VectorStoreExpirationAfter, + VectorStoreObject, + VectorStoreListResponse, + VectorStoreDeleteResponse, + VectorStoreSearchResponse, +) +``` + +Methods: + +- client.vector_stores.create(\*\*params) -> VectorStoreObject +- client.vector_stores.retrieve(vector_store_id) -> VectorStoreObject +- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStoreObject +- client.vector_stores.list(\*\*params) -> VectorStoreListResponse +- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse +- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse + +## FileBatches + +Types: + +```python +from digitalocean_genai_sdk.types.vector_stores import ( + ChunkingStrategyRequestParam, + ListVectorStoreFilesResponse, + VectorStoreFileBatchObject, +) +``` + +Methods: + +- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatchObject +- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject +- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject +- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> ListVectorStoreFilesResponse + +## Files + +Types: + +```python +from digitalocean_genai_sdk.types.vector_stores import ( + VectorStoreFileObject, + FileDeleteResponse, + FileRetrieveContentResponse, +) +``` + +Methods: + +- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFileObject +- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFileObject +- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFileObject +- client.vector_stores.files.list(vector_store_id, \*\*params) -> ListVectorStoreFilesResponse +- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse +- client.vector_stores.files.retrieve_content(file_id, \*, vector_store_id) -> FileRetrieveContentResponse diff --git a/bin/publish-pypi b/bin/publish-pypi new file mode 100644 index 00000000..826054e9 --- /dev/null +++ b/bin/publish-pypi @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -eux +mkdir -p dist +rye build --clean +rye publish --yes --token=$PYPI_TOKEN diff --git a/examples/.keep b/examples/.keep new file mode 100644 index 00000000..d8c73e93 --- /dev/null +++ b/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..54f4282a --- /dev/null +++ b/mypy.ini @@ -0,0 +1,50 @@ +[mypy] +pretty = True +show_error_codes = True + +# Exclude _files.py because mypy isn't smart enough to apply +# the correct type narrowing and as this is an internal module +# it's fine to just use Pyright. +# +# We also exclude our `tests` as mypy doesn't always infer +# types correctly and Pyright will still catch any type errors. +exclude = ^(src/digitalocean_genai_sdk/_files\.py|_dev/.*\.py|tests/.*)$ + +strict_equality = True +implicit_reexport = True +check_untyped_defs = True +no_implicit_optional = True + +warn_return_any = True +warn_unreachable = True +warn_unused_configs = True + +# Turn these options off as it could cause conflicts +# with the Pyright options. +warn_unused_ignores = False +warn_redundant_casts = False + +disallow_any_generics = True +disallow_untyped_defs = True +disallow_untyped_calls = True +disallow_subclassing_any = True +disallow_incomplete_defs = True +disallow_untyped_decorators = True +cache_fine_grained = True + +# By default, mypy reports an error if you assign a value to the result +# of a function call that doesn't return anything. We do this in our test +# cases: +# ``` +# result = ... +# assert result is None +# ``` +# Changing this codegen to make mypy happy would increase complexity +# and would not be worth it. +disable_error_code = func-returns-value,overload-cannot-match + +# https://github.com/python/mypy/issues/12162 +[mypy.overrides] +module = "black.files.*" +ignore_errors = true +ignore_missing_imports = true diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 00000000..53bca7ff --- /dev/null +++ b/noxfile.py @@ -0,0 +1,9 @@ +import nox + + +@nox.session(reuse_venv=True, name="test-pydantic-v1") +def test_pydantic_v1(session: nox.Session) -> None: + session.install("-r", "requirements-dev.lock") + session.install("pydantic<2") + + session.run("pytest", "--showlocals", "--ignore=tests/functional", *session.posargs) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..33ffc05d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,207 @@ +[project] +name = "digitalocean_genai_sdk" +version = "0.0.1-alpha.0" +description = "The official Python library for the digitalocean-genai-sdk API" +dynamic = ["readme"] +license = "MIT" +authors = [ +{ name = "Digitalocean Genai SDK", email = "" }, +] +dependencies = [ + "httpx>=0.23.0, <1", + "pydantic>=1.9.0, <3", + "typing-extensions>=4.10, <5", + "anyio>=3.5.0, <5", + "distro>=1.7.0, <2", + "sniffio", +] +requires-python = ">= 3.8" +classifiers = [ + "Typing :: Typed", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: OS Independent", + "Operating System :: POSIX", + "Operating System :: MacOS", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Topic :: Software Development :: Libraries :: Python Modules", + "License :: OSI Approved :: MIT License" +] + +[project.urls] +Homepage = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" +Repository = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" + + +[tool.rye] +managed = true +# version pins are in requirements-dev.lock +dev-dependencies = [ + "pyright==1.1.399", + "mypy", + "respx", + "pytest", + "pytest-asyncio", + "ruff", + "time-machine", + "nox", + "dirty-equals>=0.6.0", + "importlib-metadata>=6.7.0", + "rich>=13.7.1", + "nest_asyncio==1.6.0", +] + +[tool.rye.scripts] +format = { chain = [ + "format:ruff", + "format:docs", + "fix:ruff", + # run formatting again to fix any inconsistencies when imports are stripped + "format:ruff", +]} +"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md" +"format:ruff" = "ruff format" + +"lint" = { chain = [ + "check:ruff", + "typecheck", + "check:importable", +]} +"check:ruff" = "ruff check ." +"fix:ruff" = "ruff check --fix ." + +"check:importable" = "python -c 'import digitalocean_genai_sdk'" + +typecheck = { chain = [ + "typecheck:pyright", + "typecheck:mypy" +]} +"typecheck:pyright" = "pyright" +"typecheck:verify-types" = "pyright --verifytypes digitalocean_genai_sdk --ignoreexternal" +"typecheck:mypy" = "mypy ." + +[build-system] +requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"] +build-backend = "hatchling.build" + +[tool.hatch.build] +include = [ + "src/*" +] + +[tool.hatch.build.targets.wheel] +packages = ["src/digitalocean_genai_sdk"] + +[tool.hatch.build.targets.sdist] +# Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) +include = [ + "/*.toml", + "/*.json", + "/*.lock", + "/*.md", + "/mypy.ini", + "/noxfile.py", + "bin/*", + "examples/*", + "src/*", + "tests/*", +] + +[tool.hatch.metadata.hooks.fancy-pypi-readme] +content-type = "text/markdown" + +[[tool.hatch.metadata.hooks.fancy-pypi-readme.fragments]] +path = "README.md" + +[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] +# replace relative links with absolute links +pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' +replacement = '[\1](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/\g<2>)' + +[tool.pytest.ini_options] +testpaths = ["tests"] +addopts = "--tb=short" +xfail_strict = true +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "session" +filterwarnings = [ + "error" +] + +[tool.pyright] +# this enables practically every flag given by pyright. +# there are a couple of flags that are still disabled by +# default in strict mode as they are experimental and niche. +typeCheckingMode = "strict" +pythonVersion = "3.8" + +exclude = [ + "_dev", + ".venv", + ".nox", +] + +reportImplicitOverride = true +reportOverlappingOverload = false + +reportImportCycles = false +reportPrivateUsage = false + +[tool.ruff] +line-length = 120 +output-format = "grouped" +target-version = "py37" + +[tool.ruff.format] +docstring-code-format = true + +[tool.ruff.lint] +select = [ + # isort + "I", + # bugbear rules + "B", + # remove unused imports + "F401", + # bare except statements + "E722", + # unused arguments + "ARG", + # print statements + "T201", + "T203", + # misuse of typing.TYPE_CHECKING + "TC004", + # import rules + "TID251", +] +ignore = [ + # mutable defaults + "B006", +] +unfixable = [ + # disable auto fix for print statements + "T201", + "T203", +] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead" + +[tool.ruff.lint.isort] +length-sort = true +length-sort-straight = true +combine-as-imports = true +extra-standard-library = ["typing_extensions"] +known-first-party = ["digitalocean_genai_sdk", "tests"] + +[tool.ruff.lint.per-file-ignores] +"bin/**.py" = ["T201", "T203"] +"scripts/**.py" = ["T201", "T203"] +"tests/**.py" = ["T201", "T203"] +"examples/**.py" = ["T201", "T203"] diff --git a/requirements-dev.lock b/requirements-dev.lock new file mode 100644 index 00000000..bf449af3 --- /dev/null +++ b/requirements-dev.lock @@ -0,0 +1,104 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false +# generate-hashes: false +# universal: false + +-e file:. +annotated-types==0.6.0 + # via pydantic +anyio==4.4.0 + # via digitalocean-genai-sdk + # via httpx +argcomplete==3.1.2 + # via nox +certifi==2023.7.22 + # via httpcore + # via httpx +colorlog==6.7.0 + # via nox +dirty-equals==0.6.0 +distlib==0.3.7 + # via virtualenv +distro==1.8.0 + # via digitalocean-genai-sdk +exceptiongroup==1.2.2 + # via anyio + # via pytest +filelock==3.12.4 + # via virtualenv +h11==0.14.0 + # via httpcore +httpcore==1.0.2 + # via httpx +httpx==0.28.1 + # via digitalocean-genai-sdk + # via respx +idna==3.4 + # via anyio + # via httpx +importlib-metadata==7.0.0 +iniconfig==2.0.0 + # via pytest +markdown-it-py==3.0.0 + # via rich +mdurl==0.1.2 + # via markdown-it-py +mypy==1.14.1 +mypy-extensions==1.0.0 + # via mypy +nest-asyncio==1.6.0 +nodeenv==1.8.0 + # via pyright +nox==2023.4.22 +packaging==23.2 + # via nox + # via pytest +platformdirs==3.11.0 + # via virtualenv +pluggy==1.5.0 + # via pytest +pydantic==2.10.3 + # via digitalocean-genai-sdk +pydantic-core==2.27.1 + # via pydantic +pygments==2.18.0 + # via rich +pyright==1.1.399 +pytest==8.3.3 + # via pytest-asyncio +pytest-asyncio==0.24.0 +python-dateutil==2.8.2 + # via time-machine +pytz==2023.3.post1 + # via dirty-equals +respx==0.22.0 +rich==13.7.1 +ruff==0.9.4 +setuptools==68.2.2 + # via nodeenv +six==1.16.0 + # via python-dateutil +sniffio==1.3.0 + # via anyio + # via digitalocean-genai-sdk +time-machine==2.9.0 +tomli==2.0.2 + # via mypy + # via pytest +typing-extensions==4.12.2 + # via anyio + # via digitalocean-genai-sdk + # via mypy + # via pydantic + # via pydantic-core + # via pyright +virtualenv==20.24.5 + # via nox +zipp==3.17.0 + # via importlib-metadata diff --git a/requirements.lock b/requirements.lock new file mode 100644 index 00000000..e655776d --- /dev/null +++ b/requirements.lock @@ -0,0 +1,45 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: true +# with-sources: false +# generate-hashes: false +# universal: false + +-e file:. +annotated-types==0.6.0 + # via pydantic +anyio==4.4.0 + # via digitalocean-genai-sdk + # via httpx +certifi==2023.7.22 + # via httpcore + # via httpx +distro==1.8.0 + # via digitalocean-genai-sdk +exceptiongroup==1.2.2 + # via anyio +h11==0.14.0 + # via httpcore +httpcore==1.0.2 + # via httpx +httpx==0.28.1 + # via digitalocean-genai-sdk +idna==3.4 + # via anyio + # via httpx +pydantic==2.10.3 + # via digitalocean-genai-sdk +pydantic-core==2.27.1 + # via pydantic +sniffio==1.3.0 + # via anyio + # via digitalocean-genai-sdk +typing-extensions==4.12.2 + # via anyio + # via digitalocean-genai-sdk + # via pydantic + # via pydantic-core diff --git a/scripts/bootstrap b/scripts/bootstrap new file mode 100755 index 00000000..e84fe62c --- /dev/null +++ b/scripts/bootstrap @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then + brew bundle check >/dev/null 2>&1 || { + echo "==> Installing Homebrew dependencies…" + brew bundle + } +fi + +echo "==> Installing Python dependencies…" + +# experimental uv support makes installations significantly faster +rye config --set-bool behavior.use-uv=true + +rye sync --all-features diff --git a/scripts/format b/scripts/format new file mode 100755 index 00000000..667ec2d7 --- /dev/null +++ b/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running formatters" +rye run format diff --git a/scripts/lint b/scripts/lint new file mode 100755 index 00000000..3f725f2d --- /dev/null +++ b/scripts/lint @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running lints" +rye run lint + +echo "==> Making sure it imports" +rye run python -c 'import digitalocean_genai_sdk' diff --git a/scripts/mock b/scripts/mock new file mode 100755 index 00000000..d2814ae6 --- /dev/null +++ b/scripts/mock @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" && "$1" != '--'* ]]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +echo "==> Starting mock server with URL ${URL}" + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + + # Wait for server to come online + echo -n "Waiting for server" + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" +fi diff --git a/scripts/test b/scripts/test new file mode 100755 index 00000000..2b878456 --- /dev/null +++ b/scripts/test @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +export DEFER_PYDANTIC_BUILD=false + +echo "==> Running tests" +rye run pytest "$@" + +echo "==> Running Pydantic v1 tests" +rye run nox -s test-pydantic-v1 -- "$@" diff --git a/scripts/utils/ruffen-docs.py b/scripts/utils/ruffen-docs.py new file mode 100644 index 00000000..0cf2bd2f --- /dev/null +++ b/scripts/utils/ruffen-docs.py @@ -0,0 +1,167 @@ +# fork of https://github.com/asottile/blacken-docs adapted for ruff +from __future__ import annotations + +import re +import sys +import argparse +import textwrap +import contextlib +import subprocess +from typing import Match, Optional, Sequence, Generator, NamedTuple, cast + +MD_RE = re.compile( + r"(?P^(?P *)```\s*python\n)" r"(?P.*?)" r"(?P^(?P=indent)```\s*$)", + re.DOTALL | re.MULTILINE, +) +MD_PYCON_RE = re.compile( + r"(?P^(?P *)```\s*pycon\n)" r"(?P.*?)" r"(?P^(?P=indent)```.*$)", + re.DOTALL | re.MULTILINE, +) +PYCON_PREFIX = ">>> " +PYCON_CONTINUATION_PREFIX = "..." +PYCON_CONTINUATION_RE = re.compile( + rf"^{re.escape(PYCON_CONTINUATION_PREFIX)}( |$)", +) +DEFAULT_LINE_LENGTH = 100 + + +class CodeBlockError(NamedTuple): + offset: int + exc: Exception + + +def format_str( + src: str, +) -> tuple[str, Sequence[CodeBlockError]]: + errors: list[CodeBlockError] = [] + + @contextlib.contextmanager + def _collect_error(match: Match[str]) -> Generator[None, None, None]: + try: + yield + except Exception as e: + errors.append(CodeBlockError(match.start(), e)) + + def _md_match(match: Match[str]) -> str: + code = textwrap.dedent(match["code"]) + with _collect_error(match): + code = format_code_block(code) + code = textwrap.indent(code, match["indent"]) + return f"{match['before']}{code}{match['after']}" + + def _pycon_match(match: Match[str]) -> str: + code = "" + fragment = cast(Optional[str], None) + + def finish_fragment() -> None: + nonlocal code + nonlocal fragment + + if fragment is not None: + with _collect_error(match): + fragment = format_code_block(fragment) + fragment_lines = fragment.splitlines() + code += f"{PYCON_PREFIX}{fragment_lines[0]}\n" + for line in fragment_lines[1:]: + # Skip blank lines to handle Black adding a blank above + # functions within blocks. A blank line would end the REPL + # continuation prompt. + # + # >>> if True: + # ... def f(): + # ... pass + # ... + if line: + code += f"{PYCON_CONTINUATION_PREFIX} {line}\n" + if fragment_lines[-1].startswith(" "): + code += f"{PYCON_CONTINUATION_PREFIX}\n" + fragment = None + + indentation = None + for line in match["code"].splitlines(): + orig_line, line = line, line.lstrip() + if indentation is None and line: + indentation = len(orig_line) - len(line) + continuation_match = PYCON_CONTINUATION_RE.match(line) + if continuation_match and fragment is not None: + fragment += line[continuation_match.end() :] + "\n" + else: + finish_fragment() + if line.startswith(PYCON_PREFIX): + fragment = line[len(PYCON_PREFIX) :] + "\n" + else: + code += orig_line[indentation:] + "\n" + finish_fragment() + return code + + def _md_pycon_match(match: Match[str]) -> str: + code = _pycon_match(match) + code = textwrap.indent(code, match["indent"]) + return f"{match['before']}{code}{match['after']}" + + src = MD_RE.sub(_md_match, src) + src = MD_PYCON_RE.sub(_md_pycon_match, src) + return src, errors + + +def format_code_block(code: str) -> str: + return subprocess.check_output( + [ + sys.executable, + "-m", + "ruff", + "format", + "--stdin-filename=script.py", + f"--line-length={DEFAULT_LINE_LENGTH}", + ], + encoding="utf-8", + input=code, + ) + + +def format_file( + filename: str, + skip_errors: bool, +) -> int: + with open(filename, encoding="UTF-8") as f: + contents = f.read() + new_contents, errors = format_str(contents) + for error in errors: + lineno = contents[: error.offset].count("\n") + 1 + print(f"{filename}:{lineno}: code block parse error {error.exc}") + if errors and not skip_errors: + return 1 + if contents != new_contents: + print(f"{filename}: Rewriting...") + with open(filename, "w", encoding="UTF-8") as f: + f.write(new_contents) + return 0 + else: + return 0 + + +def main(argv: Sequence[str] | None = None) -> int: + parser = argparse.ArgumentParser() + parser.add_argument( + "-l", + "--line-length", + type=int, + default=DEFAULT_LINE_LENGTH, + ) + parser.add_argument( + "-S", + "--skip-string-normalization", + action="store_true", + ) + parser.add_argument("-E", "--skip-errors", action="store_true") + parser.add_argument("filenames", nargs="*") + args = parser.parse_args(argv) + + retv = 0 + for filename in args.filenames: + retv |= format_file(filename, skip_errors=args.skip_errors) + return retv + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 00000000..c1019559 --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/digitalocean-genai-sdk-python/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi diff --git a/src/digitalocean_genai_sdk/__init__.py b/src/digitalocean_genai_sdk/__init__.py new file mode 100644 index 00000000..fc240d83 --- /dev/null +++ b/src/digitalocean_genai_sdk/__init__.py @@ -0,0 +1,99 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import typing as _t + +from . import types +from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes +from ._utils import file_from_path +from ._client import ( + Client, + Stream, + Timeout, + Transport, + AsyncClient, + AsyncStream, + RequestOptions, + DigitaloceanGenaiSDK, + AsyncDigitaloceanGenaiSDK, +) +from ._models import BaseModel +from ._version import __title__, __version__ +from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse +from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS +from ._exceptions import ( + APIError, + ConflictError, + NotFoundError, + APIStatusError, + RateLimitError, + APITimeoutError, + BadRequestError, + APIConnectionError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, + DigitaloceanGenaiSDKError, + APIResponseValidationError, +) +from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._utils._logs import setup_logging as _setup_logging + +__all__ = [ + "types", + "__version__", + "__title__", + "NoneType", + "Transport", + "ProxiesTypes", + "NotGiven", + "NOT_GIVEN", + "Omit", + "DigitaloceanGenaiSDKError", + "APIError", + "APIStatusError", + "APITimeoutError", + "APIConnectionError", + "APIResponseValidationError", + "BadRequestError", + "AuthenticationError", + "PermissionDeniedError", + "NotFoundError", + "ConflictError", + "UnprocessableEntityError", + "RateLimitError", + "InternalServerError", + "Timeout", + "RequestOptions", + "Client", + "AsyncClient", + "Stream", + "AsyncStream", + "DigitaloceanGenaiSDK", + "AsyncDigitaloceanGenaiSDK", + "file_from_path", + "BaseModel", + "DEFAULT_TIMEOUT", + "DEFAULT_MAX_RETRIES", + "DEFAULT_CONNECTION_LIMITS", + "DefaultHttpxClient", + "DefaultAsyncHttpxClient", +] + +if not _t.TYPE_CHECKING: + from ._utils._resources_proxy import resources as resources + +_setup_logging() + +# Update the __module__ attribute for exported symbols so that +# error messages point to this module instead of the module +# it was originally defined in, e.g. +# digitalocean_genai_sdk._exceptions.NotFoundError -> digitalocean_genai_sdk.NotFoundError +__locals = locals() +for __name in __all__: + if not __name.startswith("__"): + try: + __locals[__name].__module__ = "digitalocean_genai_sdk" + except (TypeError, AttributeError): + # Some of our exported symbols are builtins which we can't set attributes for. + pass diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/digitalocean_genai_sdk/_base_client.py new file mode 100644 index 00000000..9f58b2f9 --- /dev/null +++ b/src/digitalocean_genai_sdk/_base_client.py @@ -0,0 +1,1943 @@ +from __future__ import annotations + +import sys +import json +import time +import uuid +import email +import asyncio +import inspect +import logging +import platform +import email.utils +from types import TracebackType +from random import random +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Type, + Union, + Generic, + Mapping, + TypeVar, + Iterable, + Iterator, + Optional, + Generator, + AsyncIterator, + cast, + overload, +) +from typing_extensions import Literal, override, get_origin + +import anyio +import httpx +import distro +import pydantic +from httpx import URL +from pydantic import PrivateAttr + +from . import _exceptions +from ._qs import Querystring +from ._files import to_httpx_files, async_to_httpx_files +from ._types import ( + NOT_GIVEN, + Body, + Omit, + Query, + Headers, + Timeout, + NotGiven, + ResponseT, + AnyMapping, + PostParser, + RequestFiles, + HttpxSendArgs, + RequestOptions, + HttpxRequestFiles, + ModelBuilderProtocol, +) +from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping +from ._compat import PYDANTIC_V2, model_copy, model_dump +from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type +from ._response import ( + APIResponse, + BaseAPIResponse, + AsyncAPIResponse, + extract_response_type, +) +from ._constants import ( + DEFAULT_TIMEOUT, + MAX_RETRY_DELAY, + DEFAULT_MAX_RETRIES, + INITIAL_RETRY_DELAY, + RAW_RESPONSE_HEADER, + OVERRIDE_CAST_TO_HEADER, + DEFAULT_CONNECTION_LIMITS, +) +from ._streaming import Stream, SSEDecoder, AsyncStream, SSEBytesDecoder +from ._exceptions import ( + APIStatusError, + APITimeoutError, + APIConnectionError, + APIResponseValidationError, +) + +log: logging.Logger = logging.getLogger(__name__) + +# TODO: make base page type vars covariant +SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]") +AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]") + + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +_StreamT = TypeVar("_StreamT", bound=Stream[Any]) +_AsyncStreamT = TypeVar("_AsyncStreamT", bound=AsyncStream[Any]) + +if TYPE_CHECKING: + from httpx._config import ( + DEFAULT_TIMEOUT_CONFIG, # pyright: ignore[reportPrivateImportUsage] + ) + + HTTPX_DEFAULT_TIMEOUT = DEFAULT_TIMEOUT_CONFIG +else: + try: + from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT + except ImportError: + # taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366 + HTTPX_DEFAULT_TIMEOUT = Timeout(5.0) + + +class PageInfo: + """Stores the necessary information to build the request to retrieve the next page. + + Either `url` or `params` must be set. + """ + + url: URL | NotGiven + params: Query | NotGiven + json: Body | NotGiven + + @overload + def __init__( + self, + *, + url: URL, + ) -> None: ... + + @overload + def __init__( + self, + *, + params: Query, + ) -> None: ... + + @overload + def __init__( + self, + *, + json: Body, + ) -> None: ... + + def __init__( + self, + *, + url: URL | NotGiven = NOT_GIVEN, + json: Body | NotGiven = NOT_GIVEN, + params: Query | NotGiven = NOT_GIVEN, + ) -> None: + self.url = url + self.json = json + self.params = params + + @override + def __repr__(self) -> str: + if self.url: + return f"{self.__class__.__name__}(url={self.url})" + if self.json: + return f"{self.__class__.__name__}(json={self.json})" + return f"{self.__class__.__name__}(params={self.params})" + + +class BasePage(GenericModel, Generic[_T]): + """ + Defines the core interface for pagination. + + Type Args: + ModelT: The pydantic model that represents an item in the response. + + Methods: + has_next_page(): Check if there is another page available + next_page_info(): Get the necessary information to make a request for the next page + """ + + _options: FinalRequestOptions = PrivateAttr() + _model: Type[_T] = PrivateAttr() + + def has_next_page(self) -> bool: + items = self._get_page_items() + if not items: + return False + return self.next_page_info() is not None + + def next_page_info(self) -> Optional[PageInfo]: ... + + def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body] + ... + + def _params_from_url(self, url: URL) -> httpx.QueryParams: + # TODO: do we have to preprocess params here? + return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params) + + def _info_to_options(self, info: PageInfo) -> FinalRequestOptions: + options = model_copy(self._options) + options._strip_raw_response_header() + + if not isinstance(info.params, NotGiven): + options.params = {**options.params, **info.params} + return options + + if not isinstance(info.url, NotGiven): + params = self._params_from_url(info.url) + url = info.url.copy_with(params=params) + options.params = dict(url.params) + options.url = str(url) + return options + + if not isinstance(info.json, NotGiven): + if not is_mapping(info.json): + raise TypeError("Pagination is only supported with mappings") + + if not options.json_data: + options.json_data = {**info.json} + else: + if not is_mapping(options.json_data): + raise TypeError("Pagination is only supported with mappings") + + options.json_data = {**options.json_data, **info.json} + return options + + raise ValueError("Unexpected PageInfo state") + + +class BaseSyncPage(BasePage[_T], Generic[_T]): + _client: SyncAPIClient = pydantic.PrivateAttr() + + def _set_private_attributes( + self, + client: SyncAPIClient, + model: Type[_T], + options: FinalRequestOptions, + ) -> None: + if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + self.__pydantic_private__ = {} + + self._model = model + self._client = client + self._options = options + + # Pydantic uses a custom `__iter__` method to support casting BaseModels + # to dictionaries. e.g. dict(model). + # As we want to support `for item in page`, this is inherently incompatible + # with the default pydantic behaviour. It is not possible to support both + # use cases at once. Fortunately, this is not a big deal as all other pydantic + # methods should continue to work as expected as there is an alternative method + # to cast a model to a dictionary, model.dict(), which is used internally + # by pydantic. + def __iter__(self) -> Iterator[_T]: # type: ignore + for page in self.iter_pages(): + for item in page._get_page_items(): + yield item + + def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]: + page = self + while True: + yield page + if page.has_next_page(): + page = page.get_next_page() + else: + return + + def get_next_page(self: SyncPageT) -> SyncPageT: + info = self.next_page_info() + if not info: + raise RuntimeError( + "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." + ) + + options = self._info_to_options(info) + return self._client._request_api_list(self._model, page=self.__class__, options=options) + + +class AsyncPaginator(Generic[_T, AsyncPageT]): + def __init__( + self, + client: AsyncAPIClient, + options: FinalRequestOptions, + page_cls: Type[AsyncPageT], + model: Type[_T], + ) -> None: + self._model = model + self._client = client + self._options = options + self._page_cls = page_cls + + def __await__(self) -> Generator[Any, None, AsyncPageT]: + return self._get_page().__await__() + + async def _get_page(self) -> AsyncPageT: + def _parser(resp: AsyncPageT) -> AsyncPageT: + resp._set_private_attributes( + model=self._model, + options=self._options, + client=self._client, + ) + return resp + + self._options.post_parser = _parser + + return await self._client.request(self._page_cls, self._options) + + async def __aiter__(self) -> AsyncIterator[_T]: + # https://github.com/microsoft/pyright/issues/3464 + page = cast( + AsyncPageT, + await self, # type: ignore + ) + async for item in page: + yield item + + +class BaseAsyncPage(BasePage[_T], Generic[_T]): + _client: AsyncAPIClient = pydantic.PrivateAttr() + + def _set_private_attributes( + self, + model: Type[_T], + client: AsyncAPIClient, + options: FinalRequestOptions, + ) -> None: + if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + self.__pydantic_private__ = {} + + self._model = model + self._client = client + self._options = options + + async def __aiter__(self) -> AsyncIterator[_T]: + async for page in self.iter_pages(): + for item in page._get_page_items(): + yield item + + async def iter_pages(self: AsyncPageT) -> AsyncIterator[AsyncPageT]: + page = self + while True: + yield page + if page.has_next_page(): + page = await page.get_next_page() + else: + return + + async def get_next_page(self: AsyncPageT) -> AsyncPageT: + info = self.next_page_info() + if not info: + raise RuntimeError( + "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." + ) + + options = self._info_to_options(info) + return await self._client._request_api_list(self._model, page=self.__class__, options=options) + + +_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient]) +_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]]) + + +class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]): + _client: _HttpxClientT + _version: str + _base_url: URL + max_retries: int + timeout: Union[float, Timeout, None] + _strict_response_validation: bool + _idempotency_header: str | None + _default_stream_cls: type[_DefaultStreamT] | None = None + + def __init__( + self, + *, + version: str, + base_url: str | URL, + _strict_response_validation: bool, + max_retries: int = DEFAULT_MAX_RETRIES, + timeout: float | Timeout | None = DEFAULT_TIMEOUT, + custom_headers: Mapping[str, str] | None = None, + custom_query: Mapping[str, object] | None = None, + ) -> None: + self._version = version + self._base_url = self._enforce_trailing_slash(URL(base_url)) + self.max_retries = max_retries + self.timeout = timeout + self._custom_headers = custom_headers or {} + self._custom_query = custom_query or {} + self._strict_response_validation = _strict_response_validation + self._idempotency_header = None + self._platform: Platform | None = None + + if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] + raise TypeError( + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `digitalocean_genai_sdk.DEFAULT_MAX_RETRIES`" + ) + + def _enforce_trailing_slash(self, url: URL) -> URL: + if url.raw_path.endswith(b"/"): + return url + return url.copy_with(raw_path=url.raw_path + b"/") + + def _make_status_error_from_response( + self, + response: httpx.Response, + ) -> APIStatusError: + if response.is_closed and not response.is_stream_consumed: + # We can't read the response body as it has been closed + # before it was read. This can happen if an event hook + # raises a status error. + body = None + err_msg = f"Error code: {response.status_code}" + else: + err_text = response.text.strip() + body = err_text + + try: + body = json.loads(err_text) + err_msg = f"Error code: {response.status_code} - {body}" + except Exception: + err_msg = err_text or f"Error code: {response.status_code}" + + return self._make_status_error(err_msg, body=body, response=response) + + def _make_status_error( + self, + err_msg: str, + *, + body: object, + response: httpx.Response, + ) -> _exceptions.APIStatusError: + raise NotImplementedError() + + def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers: + custom_headers = options.headers or {} + headers_dict = _merge_mappings(self.default_headers, custom_headers) + self._validate_headers(headers_dict, custom_headers) + + # headers are case-insensitive while dictionaries are not. + headers = httpx.Headers(headers_dict) + + idempotency_header = self._idempotency_header + if idempotency_header and options.idempotency_key and idempotency_header not in headers: + headers[idempotency_header] = options.idempotency_key + + # Don't set these headers if they were already set or removed by the caller. We check + # `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case. + lower_custom_headers = [header.lower() for header in custom_headers] + if "x-stainless-retry-count" not in lower_custom_headers: + headers["x-stainless-retry-count"] = str(retries_taken) + if "x-stainless-read-timeout" not in lower_custom_headers: + timeout = self.timeout if isinstance(options.timeout, NotGiven) else options.timeout + if isinstance(timeout, Timeout): + timeout = timeout.read + if timeout is not None: + headers["x-stainless-read-timeout"] = str(timeout) + + return headers + + def _prepare_url(self, url: str) -> URL: + """ + Merge a URL argument together with any 'base_url' on the client, + to create the URL used for the outgoing request. + """ + # Copied from httpx's `_merge_url` method. + merge_url = URL(url) + if merge_url.is_relative_url: + merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/") + return self.base_url.copy_with(raw_path=merge_raw_path) + + return merge_url + + def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder: + return SSEDecoder() + + def _build_request( + self, + options: FinalRequestOptions, + *, + retries_taken: int = 0, + ) -> httpx.Request: + if log.isEnabledFor(logging.DEBUG): + log.debug("Request options: %s", model_dump(options, exclude_unset=True)) + + kwargs: dict[str, Any] = {} + + json_data = options.json_data + if options.extra_json is not None: + if json_data is None: + json_data = cast(Body, options.extra_json) + elif is_mapping(json_data): + json_data = _merge_mappings(json_data, options.extra_json) + else: + raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") + + headers = self._build_headers(options, retries_taken=retries_taken) + params = _merge_mappings(self.default_query, options.params) + content_type = headers.get("Content-Type") + files = options.files + + # If the given Content-Type header is multipart/form-data then it + # has to be removed so that httpx can generate the header with + # additional information for us as it has to be in this form + # for the server to be able to correctly parse the request: + # multipart/form-data; boundary=---abc-- + if content_type is not None and content_type.startswith("multipart/form-data"): + if "boundary" not in content_type: + # only remove the header if the boundary hasn't been explicitly set + # as the caller doesn't want httpx to come up with their own boundary + headers.pop("Content-Type") + + # As we are now sending multipart/form-data instead of application/json + # we need to tell httpx to use it, https://www.python-httpx.org/advanced/clients/#multipart-file-encoding + if json_data: + if not is_dict(json_data): + raise TypeError( + f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead." + ) + kwargs["data"] = self._serialize_multipartform(json_data) + + # httpx determines whether or not to send a "multipart/form-data" + # request based on the truthiness of the "files" argument. + # This gets around that issue by generating a dict value that + # evaluates to true. + # + # https://github.com/encode/httpx/discussions/2399#discussioncomment-3814186 + if not files: + files = cast(HttpxRequestFiles, ForceMultipartDict()) + + prepared_url = self._prepare_url(options.url) + if "_" in prepared_url.host: + # work around https://github.com/encode/httpx/discussions/2880 + kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + + # TODO: report this error to httpx + return self._client.build_request( # pyright: ignore[reportUnknownMemberType] + headers=headers, + timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, + method=options.method, + url=prepared_url, + # the `Query` type that we use is incompatible with qs' + # `Params` type as it needs to be typed as `Mapping[str, object]` + # so that passing a `TypedDict` doesn't cause an error. + # https://github.com/microsoft/pyright/issues/3526#event-6715453066 + params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, + json=json_data if is_given(json_data) else None, + files=files, + **kwargs, + ) + + def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: + items = self.qs.stringify_items( + # TODO: type ignore is required as stringify_items is well typed but we can't be + # well typed without heavy validation. + data, # type: ignore + array_format="brackets", + ) + serialized: dict[str, object] = {} + for key, value in items: + existing = serialized.get(key) + + if not existing: + serialized[key] = value + continue + + # If a value has already been set for this key then that + # means we're sending data like `array[]=[1, 2, 3]` and we + # need to tell httpx that we want to send multiple values with + # the same key which is done by using a list or a tuple. + # + # Note: 2d arrays should never result in the same key at both + # levels so it's safe to assume that if the value is a list, + # it was because we changed it to be a list. + if is_list(existing): + existing.append(value) + else: + serialized[key] = [existing, value] + + return serialized + + def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]: + if not is_given(options.headers): + return cast_to + + # make a copy of the headers so we don't mutate user-input + headers = dict(options.headers) + + # we internally support defining a temporary header to override the + # default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response` + # see _response.py for implementation details + override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN) + if is_given(override_cast_to): + options.headers = headers + return cast(Type[ResponseT], override_cast_to) + + return cast_to + + def _should_stream_response_body(self, request: httpx.Request) -> bool: + return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return] + + def _process_response_data( + self, + *, + data: object, + cast_to: type[ResponseT], + response: httpx.Response, + ) -> ResponseT: + if data is None: + return cast(ResponseT, None) + + if cast_to is object: + return cast(ResponseT, data) + + try: + if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol): + return cast(ResponseT, cast_to.build(response=response, data=data)) + + if self._strict_response_validation: + return cast(ResponseT, validate_type(type_=cast_to, value=data)) + + return cast(ResponseT, construct_type(type_=cast_to, value=data)) + except pydantic.ValidationError as err: + raise APIResponseValidationError(response=response, body=data) from err + + @property + def qs(self) -> Querystring: + return Querystring() + + @property + def custom_auth(self) -> httpx.Auth | None: + return None + + @property + def auth_headers(self) -> dict[str, str]: + return {} + + @property + def default_headers(self) -> dict[str, str | Omit]: + return { + "Accept": "application/json", + "Content-Type": "application/json", + "User-Agent": self.user_agent, + **self.platform_headers(), + **self.auth_headers, + **self._custom_headers, + } + + @property + def default_query(self) -> dict[str, object]: + return { + **self._custom_query, + } + + def _validate_headers( + self, + headers: Headers, # noqa: ARG002 + custom_headers: Headers, # noqa: ARG002 + ) -> None: + """Validate the given default headers and custom headers. + + Does nothing by default. + """ + return + + @property + def user_agent(self) -> str: + return f"{self.__class__.__name__}/Python {self._version}" + + @property + def base_url(self) -> URL: + return self._base_url + + @base_url.setter + def base_url(self, url: URL | str) -> None: + self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url)) + + def platform_headers(self) -> Dict[str, str]: + # the actual implementation is in a separate `lru_cache` decorated + # function because adding `lru_cache` to methods will leak memory + # https://github.com/python/cpython/issues/88476 + return platform_headers(self._version, platform=self._platform) + + def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None: + """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. + + About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After + See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax + """ + if response_headers is None: + return None + + # First, try the non-standard `retry-after-ms` header for milliseconds, + # which is more precise than integer-seconds `retry-after` + try: + retry_ms_header = response_headers.get("retry-after-ms", None) + return float(retry_ms_header) / 1000 + except (TypeError, ValueError): + pass + + # Next, try parsing `retry-after` header as seconds (allowing nonstandard floats). + retry_header = response_headers.get("retry-after") + try: + # note: the spec indicates that this should only ever be an integer + # but if someone sends a float there's no reason for us to not respect it + return float(retry_header) + except (TypeError, ValueError): + pass + + # Last, try parsing `retry-after` as a date. + retry_date_tuple = email.utils.parsedate_tz(retry_header) + if retry_date_tuple is None: + return None + + retry_date = email.utils.mktime_tz(retry_date_tuple) + return float(retry_date - time.time()) + + def _calculate_retry_timeout( + self, + remaining_retries: int, + options: FinalRequestOptions, + response_headers: Optional[httpx.Headers] = None, + ) -> float: + max_retries = options.get_max_retries(self.max_retries) + + # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. + retry_after = self._parse_retry_after_header(response_headers) + if retry_after is not None and 0 < retry_after <= 60: + return retry_after + + # Also cap retry count to 1000 to avoid any potential overflows with `pow` + nb_retries = min(max_retries - remaining_retries, 1000) + + # Apply exponential backoff, but not more than the max. + sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) + + # Apply some jitter, plus-or-minus half a second. + jitter = 1 - 0.25 * random() + timeout = sleep_seconds * jitter + return timeout if timeout >= 0 else 0 + + def _should_retry(self, response: httpx.Response) -> bool: + # Note: this is not a standard header + should_retry_header = response.headers.get("x-should-retry") + + # If the server explicitly says whether or not to retry, obey. + if should_retry_header == "true": + log.debug("Retrying as header `x-should-retry` is set to `true`") + return True + if should_retry_header == "false": + log.debug("Not retrying as header `x-should-retry` is set to `false`") + return False + + # Retry on request timeouts. + if response.status_code == 408: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry on lock timeouts. + if response.status_code == 409: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry on rate limits. + if response.status_code == 429: + log.debug("Retrying due to status code %i", response.status_code) + return True + + # Retry internal errors. + if response.status_code >= 500: + log.debug("Retrying due to status code %i", response.status_code) + return True + + log.debug("Not retrying") + return False + + def _idempotency_key(self) -> str: + return f"stainless-python-retry-{uuid.uuid4()}" + + +class _DefaultHttpxClient(httpx.Client): + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + super().__init__(**kwargs) + + +if TYPE_CHECKING: + DefaultHttpxClient = httpx.Client + """An alias to `httpx.Client` that provides the same defaults that this SDK + uses internally. + + This is useful because overriding the `http_client` with your own instance of + `httpx.Client` will result in httpx's defaults being used, not ours. + """ +else: + DefaultHttpxClient = _DefaultHttpxClient + + +class SyncHttpxClientWrapper(DefaultHttpxClient): + def __del__(self) -> None: + if self.is_closed: + return + + try: + self.close() + except Exception: + pass + + +class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]): + _client: httpx.Client + _default_stream_cls: type[Stream[Any]] | None = None + + def __init__( + self, + *, + version: str, + base_url: str | URL, + max_retries: int = DEFAULT_MAX_RETRIES, + timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + http_client: httpx.Client | None = None, + custom_headers: Mapping[str, str] | None = None, + custom_query: Mapping[str, object] | None = None, + _strict_response_validation: bool, + ) -> None: + if not is_given(timeout): + # if the user passed in a custom http client with a non-default + # timeout set then we use that timeout. + # + # note: there is an edge case here where the user passes in a client + # where they've explicitly set the timeout to match the default timeout + # as this check is structural, meaning that we'll think they didn't + # pass in a timeout and will ignore it + if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT: + timeout = http_client.timeout + else: + timeout = DEFAULT_TIMEOUT + + if http_client is not None and not isinstance(http_client, httpx.Client): # pyright: ignore[reportUnnecessaryIsInstance] + raise TypeError( + f"Invalid `http_client` argument; Expected an instance of `httpx.Client` but got {type(http_client)}" + ) + + super().__init__( + version=version, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + base_url=base_url, + max_retries=max_retries, + custom_query=custom_query, + custom_headers=custom_headers, + _strict_response_validation=_strict_response_validation, + ) + self._client = http_client or SyncHttpxClientWrapper( + base_url=base_url, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + ) + + def is_closed(self) -> bool: + return self._client.is_closed + + def close(self) -> None: + """Close the underlying HTTPX client. + + The client will *not* be usable after this. + """ + # If an error is thrown while constructing a client, self._client + # may not be present + if hasattr(self, "_client"): + self._client.close() + + def __enter__(self: _T) -> _T: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.close() + + def _prepare_options( + self, + options: FinalRequestOptions, # noqa: ARG002 + ) -> FinalRequestOptions: + """Hook for mutating the given options""" + return options + + def _prepare_request( + self, + request: httpx.Request, # noqa: ARG002 + ) -> None: + """This method is used as a callback for mutating the `Request` object + after it has been constructed. + This is useful for cases where you want to add certain headers based off of + the request properties, e.g. `url`, `method` etc. + """ + return None + + @overload + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[True], + stream_cls: Type[_StreamT], + ) -> _StreamT: ... + + @overload + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool = False, + stream_cls: Type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: ... + + def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool = False, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: + cast_to = self._maybe_override_cast_to(cast_to, options) + + # create a copy of the options we were given so that if the + # options are mutated later & we then retry, the retries are + # given the original options + input_options = model_copy(options) + if input_options.idempotency_key is None and input_options.method.lower() != "get": + # ensure the idempotency key is reused between requests + input_options.idempotency_key = self._idempotency_key() + + response: httpx.Response | None = None + max_retries = input_options.get_max_retries(self.max_retries) + + retries_taken = 0 + for retries_taken in range(max_retries + 1): + options = model_copy(input_options) + options = self._prepare_options(options) + + remaining_retries = max_retries - retries_taken + request = self._build_request(options, retries_taken=retries_taken) + self._prepare_request(request) + + kwargs: HttpxSendArgs = {} + if self.custom_auth is not None: + kwargs["auth"] = self.custom_auth + + log.debug("Sending HTTP Request: %s %s", request.method, request.url) + + response = None + try: + response = self._client.send( + request, + stream=stream or self._should_stream_response_body(request=request), + **kwargs, + ) + except httpx.TimeoutException as err: + log.debug("Encountered httpx.TimeoutException", exc_info=True) + + if remaining_retries > 0: + self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising timeout error") + raise APITimeoutError(request=request) from err + except Exception as err: + log.debug("Encountered Exception", exc_info=True) + + if remaining_retries > 0: + self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising connection error") + raise APIConnectionError(request=request) from err + + log.debug( + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + response.headers, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code + log.debug("Encountered httpx.HTTPStatusError", exc_info=True) + + if remaining_retries > 0 and self._should_retry(err.response): + err.response.close() + self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=response, + ) + continue + + # If the response is streamed then we need to explicitly read the response + # to completion before attempting to access the response text. + if not err.response.is_closed: + err.response.read() + + log.debug("Re-raising status error") + raise self._make_status_error_from_response(err.response) from None + + break + + assert response is not None, "could not resolve response (should never happen)" + return self._process_response( + cast_to=cast_to, + options=options, + response=response, + stream=stream, + stream_cls=stream_cls, + retries_taken=retries_taken, + ) + + def _sleep_for_retry( + self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None + ) -> None: + remaining_retries = max_retries - retries_taken + if remaining_retries == 1: + log.debug("1 retry left") + else: + log.debug("%i retries left", remaining_retries) + + timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None) + log.info("Retrying request to %s in %f seconds", options.url, timeout) + + time.sleep(timeout) + + def _process_response( + self, + *, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + response: httpx.Response, + stream: bool, + stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + retries_taken: int = 0, + ) -> ResponseT: + origin = get_origin(cast_to) or cast_to + + if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if not issubclass(origin, APIResponse): + raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") + + response_cls = cast("type[BaseAPIResponse[Any]]", cast_to) + return cast( + ResponseT, + response_cls( + raw=response, + client=self, + cast_to=extract_response_type(response_cls), + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ), + ) + + if cast_to == httpx.Response: + return cast(ResponseT, response) + + api_response = APIResponse( + raw=response, + client=self, + cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast] + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ) + if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): + return cast(ResponseT, api_response) + + return api_response.parse() + + def _request_api_list( + self, + model: Type[object], + page: Type[SyncPageT], + options: FinalRequestOptions, + ) -> SyncPageT: + def _parser(resp: SyncPageT) -> SyncPageT: + resp._set_private_attributes( + client=self, + model=model, + options=options, + ) + return resp + + options.post_parser = _parser + + return self.request(page, options, stream=False) + + @overload + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[True], + stream_cls: type[_StreamT], + ) -> _StreamT: ... + + @overload + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: ... + + def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool = False, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: + opts = FinalRequestOptions.construct(method="get", url=path, **options) + # cast is required because mypy complains about returning Any even though + # it understands the type variables + return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) + + @overload + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: Literal[True], + stream_cls: type[_StreamT], + ) -> _StreamT: ... + + @overload + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: bool, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: ... + + def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + files: RequestFiles | None = None, + stream: bool = False, + stream_cls: type[_StreamT] | None = None, + ) -> ResponseT | _StreamT: + opts = FinalRequestOptions.construct( + method="post", url=path, json_data=body, files=to_httpx_files(files), **options + ) + return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) + + def patch( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + return self.request(cast_to, opts) + + def put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct( + method="put", url=path, json_data=body, files=to_httpx_files(files), **options + ) + return self.request(cast_to, opts) + + def delete( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) + return self.request(cast_to, opts) + + def get_api_list( + self, + path: str, + *, + model: Type[object], + page: Type[SyncPageT], + body: Body | None = None, + options: RequestOptions = {}, + method: str = "get", + ) -> SyncPageT: + opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) + return self._request_api_list(model, page, opts) + + +class _DefaultAsyncHttpxClient(httpx.AsyncClient): + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + super().__init__(**kwargs) + + +if TYPE_CHECKING: + DefaultAsyncHttpxClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK + uses internally. + + This is useful because overriding the `http_client` with your own instance of + `httpx.AsyncClient` will result in httpx's defaults being used, not ours. + """ +else: + DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + + +class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): + def __del__(self) -> None: + if self.is_closed: + return + + try: + # TODO(someday): support non asyncio runtimes here + asyncio.get_running_loop().create_task(self.aclose()) + except Exception: + pass + + +class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]): + _client: httpx.AsyncClient + _default_stream_cls: type[AsyncStream[Any]] | None = None + + def __init__( + self, + *, + version: str, + base_url: str | URL, + _strict_response_validation: bool, + max_retries: int = DEFAULT_MAX_RETRIES, + timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + http_client: httpx.AsyncClient | None = None, + custom_headers: Mapping[str, str] | None = None, + custom_query: Mapping[str, object] | None = None, + ) -> None: + if not is_given(timeout): + # if the user passed in a custom http client with a non-default + # timeout set then we use that timeout. + # + # note: there is an edge case here where the user passes in a client + # where they've explicitly set the timeout to match the default timeout + # as this check is structural, meaning that we'll think they didn't + # pass in a timeout and will ignore it + if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT: + timeout = http_client.timeout + else: + timeout = DEFAULT_TIMEOUT + + if http_client is not None and not isinstance(http_client, httpx.AsyncClient): # pyright: ignore[reportUnnecessaryIsInstance] + raise TypeError( + f"Invalid `http_client` argument; Expected an instance of `httpx.AsyncClient` but got {type(http_client)}" + ) + + super().__init__( + version=version, + base_url=base_url, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + max_retries=max_retries, + custom_query=custom_query, + custom_headers=custom_headers, + _strict_response_validation=_strict_response_validation, + ) + self._client = http_client or AsyncHttpxClientWrapper( + base_url=base_url, + # cast to a valid type because mypy doesn't understand our type narrowing + timeout=cast(Timeout, timeout), + ) + + def is_closed(self) -> bool: + return self._client.is_closed + + async def close(self) -> None: + """Close the underlying HTTPX client. + + The client will *not* be usable after this. + """ + await self._client.aclose() + + async def __aenter__(self: _T) -> _T: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + await self.close() + + async def _prepare_options( + self, + options: FinalRequestOptions, # noqa: ARG002 + ) -> FinalRequestOptions: + """Hook for mutating the given options""" + return options + + async def _prepare_request( + self, + request: httpx.Request, # noqa: ARG002 + ) -> None: + """This method is used as a callback for mutating the `Request` object + after it has been constructed. + This is useful for cases where you want to add certain headers based off of + the request properties, e.g. `url`, `method` etc. + """ + return None + + @overload + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: Literal[True], + stream_cls: type[_AsyncStreamT], + ) -> _AsyncStreamT: ... + + @overload + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: ... + + async def request( + self, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + *, + stream: bool = False, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: + if self._platform is None: + # `get_platform` can make blocking IO calls so we + # execute it earlier while we are in an async context + self._platform = await asyncify(get_platform)() + + cast_to = self._maybe_override_cast_to(cast_to, options) + + # create a copy of the options we were given so that if the + # options are mutated later & we then retry, the retries are + # given the original options + input_options = model_copy(options) + if input_options.idempotency_key is None and input_options.method.lower() != "get": + # ensure the idempotency key is reused between requests + input_options.idempotency_key = self._idempotency_key() + + response: httpx.Response | None = None + max_retries = input_options.get_max_retries(self.max_retries) + + retries_taken = 0 + for retries_taken in range(max_retries + 1): + options = model_copy(input_options) + options = await self._prepare_options(options) + + remaining_retries = max_retries - retries_taken + request = self._build_request(options, retries_taken=retries_taken) + await self._prepare_request(request) + + kwargs: HttpxSendArgs = {} + if self.custom_auth is not None: + kwargs["auth"] = self.custom_auth + + log.debug("Sending HTTP Request: %s %s", request.method, request.url) + + response = None + try: + response = await self._client.send( + request, + stream=stream or self._should_stream_response_body(request=request), + **kwargs, + ) + except httpx.TimeoutException as err: + log.debug("Encountered httpx.TimeoutException", exc_info=True) + + if remaining_retries > 0: + await self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising timeout error") + raise APITimeoutError(request=request) from err + except Exception as err: + log.debug("Encountered Exception", exc_info=True) + + if remaining_retries > 0: + await self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=None, + ) + continue + + log.debug("Raising connection error") + raise APIConnectionError(request=request) from err + + log.debug( + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + response.headers, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code + log.debug("Encountered httpx.HTTPStatusError", exc_info=True) + + if remaining_retries > 0 and self._should_retry(err.response): + await err.response.aclose() + await self._sleep_for_retry( + retries_taken=retries_taken, + max_retries=max_retries, + options=input_options, + response=response, + ) + continue + + # If the response is streamed then we need to explicitly read the response + # to completion before attempting to access the response text. + if not err.response.is_closed: + await err.response.aread() + + log.debug("Re-raising status error") + raise self._make_status_error_from_response(err.response) from None + + break + + assert response is not None, "could not resolve response (should never happen)" + return await self._process_response( + cast_to=cast_to, + options=options, + response=response, + stream=stream, + stream_cls=stream_cls, + retries_taken=retries_taken, + ) + + async def _sleep_for_retry( + self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None + ) -> None: + remaining_retries = max_retries - retries_taken + if remaining_retries == 1: + log.debug("1 retry left") + else: + log.debug("%i retries left", remaining_retries) + + timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None) + log.info("Retrying request to %s in %f seconds", options.url, timeout) + + await anyio.sleep(timeout) + + async def _process_response( + self, + *, + cast_to: Type[ResponseT], + options: FinalRequestOptions, + response: httpx.Response, + stream: bool, + stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + retries_taken: int = 0, + ) -> ResponseT: + origin = get_origin(cast_to) or cast_to + + if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if not issubclass(origin, AsyncAPIResponse): + raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") + + response_cls = cast("type[BaseAPIResponse[Any]]", cast_to) + return cast( + "ResponseT", + response_cls( + raw=response, + client=self, + cast_to=extract_response_type(response_cls), + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ), + ) + + if cast_to == httpx.Response: + return cast(ResponseT, response) + + api_response = AsyncAPIResponse( + raw=response, + client=self, + cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast] + stream=stream, + stream_cls=stream_cls, + options=options, + retries_taken=retries_taken, + ) + if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): + return cast(ResponseT, api_response) + + return await api_response.parse() + + def _request_api_list( + self, + model: Type[_T], + page: Type[AsyncPageT], + options: FinalRequestOptions, + ) -> AsyncPaginator[_T, AsyncPageT]: + return AsyncPaginator(client=self, options=options, page_cls=page, model=model) + + @overload + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: Literal[True], + stream_cls: type[_AsyncStreamT], + ) -> _AsyncStreamT: ... + + @overload + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: ... + + async def get( + self, + path: str, + *, + cast_to: Type[ResponseT], + options: RequestOptions = {}, + stream: bool = False, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: + opts = FinalRequestOptions.construct(method="get", url=path, **options) + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + + @overload + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: Literal[False] = False, + ) -> ResponseT: ... + + @overload + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: Literal[True], + stream_cls: type[_AsyncStreamT], + ) -> _AsyncStreamT: ... + + @overload + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: bool, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: ... + + async def post( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + stream: bool = False, + stream_cls: type[_AsyncStreamT] | None = None, + ) -> ResponseT | _AsyncStreamT: + opts = FinalRequestOptions.construct( + method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options + ) + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + + async def patch( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + return await self.request(cast_to, opts) + + async def put( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + files: RequestFiles | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct( + method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options + ) + return await self.request(cast_to, opts) + + async def delete( + self, + path: str, + *, + cast_to: Type[ResponseT], + body: Body | None = None, + options: RequestOptions = {}, + ) -> ResponseT: + opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) + return await self.request(cast_to, opts) + + def get_api_list( + self, + path: str, + *, + model: Type[_T], + page: Type[AsyncPageT], + body: Body | None = None, + options: RequestOptions = {}, + method: str = "get", + ) -> AsyncPaginator[_T, AsyncPageT]: + opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) + return self._request_api_list(model, page, opts) + + +def make_request_options( + *, + query: Query | None = None, + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + idempotency_key: str | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + post_parser: PostParser | NotGiven = NOT_GIVEN, +) -> RequestOptions: + """Create a dict of type RequestOptions without keys of NotGiven values.""" + options: RequestOptions = {} + if extra_headers is not None: + options["headers"] = extra_headers + + if extra_body is not None: + options["extra_json"] = cast(AnyMapping, extra_body) + + if query is not None: + options["params"] = query + + if extra_query is not None: + options["params"] = {**options.get("params", {}), **extra_query} + + if not isinstance(timeout, NotGiven): + options["timeout"] = timeout + + if idempotency_key is not None: + options["idempotency_key"] = idempotency_key + + if is_given(post_parser): + # internal + options["post_parser"] = post_parser # type: ignore + + return options + + +class ForceMultipartDict(Dict[str, None]): + def __bool__(self) -> bool: + return True + + +class OtherPlatform: + def __init__(self, name: str) -> None: + self.name = name + + @override + def __str__(self) -> str: + return f"Other:{self.name}" + + +Platform = Union[ + OtherPlatform, + Literal[ + "MacOS", + "Linux", + "Windows", + "FreeBSD", + "OpenBSD", + "iOS", + "Android", + "Unknown", + ], +] + + +def get_platform() -> Platform: + try: + system = platform.system().lower() + platform_name = platform.platform().lower() + except Exception: + return "Unknown" + + if "iphone" in platform_name or "ipad" in platform_name: + # Tested using Python3IDE on an iPhone 11 and Pythonista on an iPad 7 + # system is Darwin and platform_name is a string like: + # - Darwin-21.6.0-iPhone12,1-64bit + # - Darwin-21.6.0-iPad7,11-64bit + return "iOS" + + if system == "darwin": + return "MacOS" + + if system == "windows": + return "Windows" + + if "android" in platform_name: + # Tested using Pydroid 3 + # system is Linux and platform_name is a string like 'Linux-5.10.81-android12-9-00001-geba40aecb3b7-ab8534902-aarch64-with-libc' + return "Android" + + if system == "linux": + # https://distro.readthedocs.io/en/latest/#distro.id + distro_id = distro.id() + if distro_id == "freebsd": + return "FreeBSD" + + if distro_id == "openbsd": + return "OpenBSD" + + return "Linux" + + if platform_name: + return OtherPlatform(platform_name) + + return "Unknown" + + +@lru_cache(maxsize=None) +def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]: + return { + "X-Stainless-Lang": "python", + "X-Stainless-Package-Version": version, + "X-Stainless-OS": str(platform or get_platform()), + "X-Stainless-Arch": str(get_architecture()), + "X-Stainless-Runtime": get_python_runtime(), + "X-Stainless-Runtime-Version": get_python_version(), + } + + +class OtherArch: + def __init__(self, name: str) -> None: + self.name = name + + @override + def __str__(self) -> str: + return f"other:{self.name}" + + +Arch = Union[OtherArch, Literal["x32", "x64", "arm", "arm64", "unknown"]] + + +def get_python_runtime() -> str: + try: + return platform.python_implementation() + except Exception: + return "unknown" + + +def get_python_version() -> str: + try: + return platform.python_version() + except Exception: + return "unknown" + + +def get_architecture() -> Arch: + try: + machine = platform.machine().lower() + except Exception: + return "unknown" + + if machine in ("arm64", "aarch64"): + return "arm64" + + # TODO: untested + if machine == "arm": + return "arm" + + if machine == "x86_64": + return "x64" + + # TODO: untested + if sys.maxsize <= 2**32: + return "x32" + + if machine: + return OtherArch(machine) + + return "unknown" + + +def _merge_mappings( + obj1: Mapping[_T_co, Union[_T, Omit]], + obj2: Mapping[_T_co, Union[_T, Omit]], +) -> Dict[_T_co, _T]: + """Merge two mappings of the same type, removing any values that are instances of `Omit`. + + In cases with duplicate keys the second mapping takes precedence. + """ + merged = {**obj1, **obj2} + return {key: value for key, value in merged.items() if not isinstance(value, Omit)} diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py new file mode 100644 index 00000000..99580b5e --- /dev/null +++ b/src/digitalocean_genai_sdk/_client.py @@ -0,0 +1,549 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, Union, Mapping +from typing_extensions import Self, override + +import httpx + +from . import _exceptions +from ._qs import Querystring +from ._types import ( + NOT_GIVEN, + Omit, + Timeout, + NotGiven, + Transport, + ProxiesTypes, + RequestOptions, +) +from ._utils import is_given, get_async_library +from ._version import __version__ +from .resources import ( + audio, + files, + images, + models, + batches, + uploads, + realtime, + responses, + assistants, + embeddings, + completions, + moderations, +) +from ._streaming import Stream as Stream, AsyncStream as AsyncStream +from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError +from ._base_client import ( + DEFAULT_MAX_RETRIES, + SyncAPIClient, + AsyncAPIClient, +) +from .resources.chat import chat +from .resources.threads import threads +from .resources.fine_tuning import fine_tuning +from .resources.organization import organization +from .resources.vector_stores import vector_stores + +__all__ = [ + "Timeout", + "Transport", + "ProxiesTypes", + "RequestOptions", + "DigitaloceanGenaiSDK", + "AsyncDigitaloceanGenaiSDK", + "Client", + "AsyncClient", +] + + +class DigitaloceanGenaiSDK(SyncAPIClient): + assistants: assistants.AssistantsResource + audio: audio.AudioResource + batches: batches.BatchesResource + chat: chat.ChatResource + completions: completions.CompletionsResource + embeddings: embeddings.EmbeddingsResource + files: files.FilesResource + fine_tuning: fine_tuning.FineTuningResource + images: images.ImagesResource + models: models.ModelsResource + moderations: moderations.ModerationsResource + organization: organization.OrganizationResource + realtime: realtime.RealtimeResource + responses: responses.ResponsesResource + threads: threads.ThreadsResource + uploads: uploads.UploadsResource + vector_stores: vector_stores.VectorStoresResource + with_raw_response: DigitaloceanGenaiSDKWithRawResponse + with_streaming_response: DigitaloceanGenaiSDKWithStreamedResponse + + # client options + api_key: str + + def __init__( + self, + *, + api_key: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, + max_retries: int = DEFAULT_MAX_RETRIES, + default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + # Configure a custom httpx client. + # We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`. + # See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details. + http_client: httpx.Client | None = None, + # Enable or disable schema validation for data returned by the API. + # When enabled an error APIResponseValidationError is raised + # if the API responds with invalid data for the expected schema. + # + # This parameter may be removed or changed in the future. + # If you rely on this feature, please open a GitHub issue + # outlining your use-case to help us decide if it should be + # part of our public interface in the future. + _strict_response_validation: bool = False, + ) -> None: + """Construct a new synchronous DigitaloceanGenaiSDK client instance. + + This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. + """ + if api_key is None: + api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") + if api_key is None: + raise DigitaloceanGenaiSDKError( + "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" + ) + self.api_key = api_key + + if base_url is None: + base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") + if base_url is None: + base_url = f"https://api.example.com" + + super().__init__( + version=__version__, + base_url=base_url, + max_retries=max_retries, + timeout=timeout, + http_client=http_client, + custom_headers=default_headers, + custom_query=default_query, + _strict_response_validation=_strict_response_validation, + ) + + self.assistants = assistants.AssistantsResource(self) + self.audio = audio.AudioResource(self) + self.batches = batches.BatchesResource(self) + self.chat = chat.ChatResource(self) + self.completions = completions.CompletionsResource(self) + self.embeddings = embeddings.EmbeddingsResource(self) + self.files = files.FilesResource(self) + self.fine_tuning = fine_tuning.FineTuningResource(self) + self.images = images.ImagesResource(self) + self.models = models.ModelsResource(self) + self.moderations = moderations.ModerationsResource(self) + self.organization = organization.OrganizationResource(self) + self.realtime = realtime.RealtimeResource(self) + self.responses = responses.ResponsesResource(self) + self.threads = threads.ThreadsResource(self) + self.uploads = uploads.UploadsResource(self) + self.vector_stores = vector_stores.VectorStoresResource(self) + self.with_raw_response = DigitaloceanGenaiSDKWithRawResponse(self) + self.with_streaming_response = DigitaloceanGenaiSDKWithStreamedResponse(self) + + @property + @override + def qs(self) -> Querystring: + return Querystring(array_format="comma") + + @property + @override + def auth_headers(self) -> dict[str, str]: + api_key = self.api_key + return {"Authorization": f"Bearer {api_key}"} + + @property + @override + def default_headers(self) -> dict[str, str | Omit]: + return { + **super().default_headers, + "X-Stainless-Async": "false", + **self._custom_headers, + } + + def copy( + self, + *, + api_key: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + http_client: httpx.Client | None = None, + max_retries: int | NotGiven = NOT_GIVEN, + default_headers: Mapping[str, str] | None = None, + set_default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + set_default_query: Mapping[str, object] | None = None, + _extra_kwargs: Mapping[str, Any] = {}, + ) -> Self: + """ + Create a new client instance re-using the same options given to the current client with optional overriding. + """ + if default_headers is not None and set_default_headers is not None: + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + + if default_query is not None and set_default_query is not None: + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + + headers = self._custom_headers + if default_headers is not None: + headers = {**headers, **default_headers} + elif set_default_headers is not None: + headers = set_default_headers + + params = self._custom_query + if default_query is not None: + params = {**params, **default_query} + elif set_default_query is not None: + params = set_default_query + + http_client = http_client or self._client + return self.__class__( + api_key=api_key or self.api_key, + base_url=base_url or self.base_url, + timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, + http_client=http_client, + max_retries=max_retries if is_given(max_retries) else self.max_retries, + default_headers=headers, + default_query=params, + **_extra_kwargs, + ) + + # Alias for `copy` for nicer inline usage, e.g. + # client.with_options(timeout=10).foo.create(...) + with_options = copy + + @override + def _make_status_error( + self, + err_msg: str, + *, + body: object, + response: httpx.Response, + ) -> APIStatusError: + if response.status_code == 400: + return _exceptions.BadRequestError(err_msg, response=response, body=body) + + if response.status_code == 401: + return _exceptions.AuthenticationError(err_msg, response=response, body=body) + + if response.status_code == 403: + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + + if response.status_code == 404: + return _exceptions.NotFoundError(err_msg, response=response, body=body) + + if response.status_code == 409: + return _exceptions.ConflictError(err_msg, response=response, body=body) + + if response.status_code == 422: + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + + if response.status_code == 429: + return _exceptions.RateLimitError(err_msg, response=response, body=body) + + if response.status_code >= 500: + return _exceptions.InternalServerError(err_msg, response=response, body=body) + return APIStatusError(err_msg, response=response, body=body) + + +class AsyncDigitaloceanGenaiSDK(AsyncAPIClient): + assistants: assistants.AsyncAssistantsResource + audio: audio.AsyncAudioResource + batches: batches.AsyncBatchesResource + chat: chat.AsyncChatResource + completions: completions.AsyncCompletionsResource + embeddings: embeddings.AsyncEmbeddingsResource + files: files.AsyncFilesResource + fine_tuning: fine_tuning.AsyncFineTuningResource + images: images.AsyncImagesResource + models: models.AsyncModelsResource + moderations: moderations.AsyncModerationsResource + organization: organization.AsyncOrganizationResource + realtime: realtime.AsyncRealtimeResource + responses: responses.AsyncResponsesResource + threads: threads.AsyncThreadsResource + uploads: uploads.AsyncUploadsResource + vector_stores: vector_stores.AsyncVectorStoresResource + with_raw_response: AsyncDigitaloceanGenaiSDKWithRawResponse + with_streaming_response: AsyncDigitaloceanGenaiSDKWithStreamedResponse + + # client options + api_key: str + + def __init__( + self, + *, + api_key: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, + max_retries: int = DEFAULT_MAX_RETRIES, + default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + # Configure a custom httpx client. + # We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`. + # See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details. + http_client: httpx.AsyncClient | None = None, + # Enable or disable schema validation for data returned by the API. + # When enabled an error APIResponseValidationError is raised + # if the API responds with invalid data for the expected schema. + # + # This parameter may be removed or changed in the future. + # If you rely on this feature, please open a GitHub issue + # outlining your use-case to help us decide if it should be + # part of our public interface in the future. + _strict_response_validation: bool = False, + ) -> None: + """Construct a new async AsyncDigitaloceanGenaiSDK client instance. + + This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. + """ + if api_key is None: + api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") + if api_key is None: + raise DigitaloceanGenaiSDKError( + "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" + ) + self.api_key = api_key + + if base_url is None: + base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") + if base_url is None: + base_url = f"https://api.example.com" + + super().__init__( + version=__version__, + base_url=base_url, + max_retries=max_retries, + timeout=timeout, + http_client=http_client, + custom_headers=default_headers, + custom_query=default_query, + _strict_response_validation=_strict_response_validation, + ) + + self.assistants = assistants.AsyncAssistantsResource(self) + self.audio = audio.AsyncAudioResource(self) + self.batches = batches.AsyncBatchesResource(self) + self.chat = chat.AsyncChatResource(self) + self.completions = completions.AsyncCompletionsResource(self) + self.embeddings = embeddings.AsyncEmbeddingsResource(self) + self.files = files.AsyncFilesResource(self) + self.fine_tuning = fine_tuning.AsyncFineTuningResource(self) + self.images = images.AsyncImagesResource(self) + self.models = models.AsyncModelsResource(self) + self.moderations = moderations.AsyncModerationsResource(self) + self.organization = organization.AsyncOrganizationResource(self) + self.realtime = realtime.AsyncRealtimeResource(self) + self.responses = responses.AsyncResponsesResource(self) + self.threads = threads.AsyncThreadsResource(self) + self.uploads = uploads.AsyncUploadsResource(self) + self.vector_stores = vector_stores.AsyncVectorStoresResource(self) + self.with_raw_response = AsyncDigitaloceanGenaiSDKWithRawResponse(self) + self.with_streaming_response = AsyncDigitaloceanGenaiSDKWithStreamedResponse(self) + + @property + @override + def qs(self) -> Querystring: + return Querystring(array_format="comma") + + @property + @override + def auth_headers(self) -> dict[str, str]: + api_key = self.api_key + return {"Authorization": f"Bearer {api_key}"} + + @property + @override + def default_headers(self) -> dict[str, str | Omit]: + return { + **super().default_headers, + "X-Stainless-Async": f"async:{get_async_library()}", + **self._custom_headers, + } + + def copy( + self, + *, + api_key: str | None = None, + base_url: str | httpx.URL | None = None, + timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + http_client: httpx.AsyncClient | None = None, + max_retries: int | NotGiven = NOT_GIVEN, + default_headers: Mapping[str, str] | None = None, + set_default_headers: Mapping[str, str] | None = None, + default_query: Mapping[str, object] | None = None, + set_default_query: Mapping[str, object] | None = None, + _extra_kwargs: Mapping[str, Any] = {}, + ) -> Self: + """ + Create a new client instance re-using the same options given to the current client with optional overriding. + """ + if default_headers is not None and set_default_headers is not None: + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + + if default_query is not None and set_default_query is not None: + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + + headers = self._custom_headers + if default_headers is not None: + headers = {**headers, **default_headers} + elif set_default_headers is not None: + headers = set_default_headers + + params = self._custom_query + if default_query is not None: + params = {**params, **default_query} + elif set_default_query is not None: + params = set_default_query + + http_client = http_client or self._client + return self.__class__( + api_key=api_key or self.api_key, + base_url=base_url or self.base_url, + timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, + http_client=http_client, + max_retries=max_retries if is_given(max_retries) else self.max_retries, + default_headers=headers, + default_query=params, + **_extra_kwargs, + ) + + # Alias for `copy` for nicer inline usage, e.g. + # client.with_options(timeout=10).foo.create(...) + with_options = copy + + @override + def _make_status_error( + self, + err_msg: str, + *, + body: object, + response: httpx.Response, + ) -> APIStatusError: + if response.status_code == 400: + return _exceptions.BadRequestError(err_msg, response=response, body=body) + + if response.status_code == 401: + return _exceptions.AuthenticationError(err_msg, response=response, body=body) + + if response.status_code == 403: + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + + if response.status_code == 404: + return _exceptions.NotFoundError(err_msg, response=response, body=body) + + if response.status_code == 409: + return _exceptions.ConflictError(err_msg, response=response, body=body) + + if response.status_code == 422: + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + + if response.status_code == 429: + return _exceptions.RateLimitError(err_msg, response=response, body=body) + + if response.status_code >= 500: + return _exceptions.InternalServerError(err_msg, response=response, body=body) + return APIStatusError(err_msg, response=response, body=body) + + +class DigitaloceanGenaiSDKWithRawResponse: + def __init__(self, client: DigitaloceanGenaiSDK) -> None: + self.assistants = assistants.AssistantsResourceWithRawResponse(client.assistants) + self.audio = audio.AudioResourceWithRawResponse(client.audio) + self.batches = batches.BatchesResourceWithRawResponse(client.batches) + self.chat = chat.ChatResourceWithRawResponse(client.chat) + self.completions = completions.CompletionsResourceWithRawResponse(client.completions) + self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings) + self.files = files.FilesResourceWithRawResponse(client.files) + self.fine_tuning = fine_tuning.FineTuningResourceWithRawResponse(client.fine_tuning) + self.images = images.ImagesResourceWithRawResponse(client.images) + self.models = models.ModelsResourceWithRawResponse(client.models) + self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations) + self.organization = organization.OrganizationResourceWithRawResponse(client.organization) + self.realtime = realtime.RealtimeResourceWithRawResponse(client.realtime) + self.responses = responses.ResponsesResourceWithRawResponse(client.responses) + self.threads = threads.ThreadsResourceWithRawResponse(client.threads) + self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads) + self.vector_stores = vector_stores.VectorStoresResourceWithRawResponse(client.vector_stores) + + +class AsyncDigitaloceanGenaiSDKWithRawResponse: + def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + self.assistants = assistants.AsyncAssistantsResourceWithRawResponse(client.assistants) + self.audio = audio.AsyncAudioResourceWithRawResponse(client.audio) + self.batches = batches.AsyncBatchesResourceWithRawResponse(client.batches) + self.chat = chat.AsyncChatResourceWithRawResponse(client.chat) + self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions) + self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings) + self.files = files.AsyncFilesResourceWithRawResponse(client.files) + self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithRawResponse(client.fine_tuning) + self.images = images.AsyncImagesResourceWithRawResponse(client.images) + self.models = models.AsyncModelsResourceWithRawResponse(client.models) + self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations) + self.organization = organization.AsyncOrganizationResourceWithRawResponse(client.organization) + self.realtime = realtime.AsyncRealtimeResourceWithRawResponse(client.realtime) + self.responses = responses.AsyncResponsesResourceWithRawResponse(client.responses) + self.threads = threads.AsyncThreadsResourceWithRawResponse(client.threads) + self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads) + self.vector_stores = vector_stores.AsyncVectorStoresResourceWithRawResponse(client.vector_stores) + + +class DigitaloceanGenaiSDKWithStreamedResponse: + def __init__(self, client: DigitaloceanGenaiSDK) -> None: + self.assistants = assistants.AssistantsResourceWithStreamingResponse(client.assistants) + self.audio = audio.AudioResourceWithStreamingResponse(client.audio) + self.batches = batches.BatchesResourceWithStreamingResponse(client.batches) + self.chat = chat.ChatResourceWithStreamingResponse(client.chat) + self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions) + self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings) + self.files = files.FilesResourceWithStreamingResponse(client.files) + self.fine_tuning = fine_tuning.FineTuningResourceWithStreamingResponse(client.fine_tuning) + self.images = images.ImagesResourceWithStreamingResponse(client.images) + self.models = models.ModelsResourceWithStreamingResponse(client.models) + self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations) + self.organization = organization.OrganizationResourceWithStreamingResponse(client.organization) + self.realtime = realtime.RealtimeResourceWithStreamingResponse(client.realtime) + self.responses = responses.ResponsesResourceWithStreamingResponse(client.responses) + self.threads = threads.ThreadsResourceWithStreamingResponse(client.threads) + self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads) + self.vector_stores = vector_stores.VectorStoresResourceWithStreamingResponse(client.vector_stores) + + +class AsyncDigitaloceanGenaiSDKWithStreamedResponse: + def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + self.assistants = assistants.AsyncAssistantsResourceWithStreamingResponse(client.assistants) + self.audio = audio.AsyncAudioResourceWithStreamingResponse(client.audio) + self.batches = batches.AsyncBatchesResourceWithStreamingResponse(client.batches) + self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat) + self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions) + self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings) + self.files = files.AsyncFilesResourceWithStreamingResponse(client.files) + self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithStreamingResponse(client.fine_tuning) + self.images = images.AsyncImagesResourceWithStreamingResponse(client.images) + self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) + self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations) + self.organization = organization.AsyncOrganizationResourceWithStreamingResponse(client.organization) + self.realtime = realtime.AsyncRealtimeResourceWithStreamingResponse(client.realtime) + self.responses = responses.AsyncResponsesResourceWithStreamingResponse(client.responses) + self.threads = threads.AsyncThreadsResourceWithStreamingResponse(client.threads) + self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads) + self.vector_stores = vector_stores.AsyncVectorStoresResourceWithStreamingResponse(client.vector_stores) + + +Client = DigitaloceanGenaiSDK + +AsyncClient = AsyncDigitaloceanGenaiSDK diff --git a/src/digitalocean_genai_sdk/_compat.py b/src/digitalocean_genai_sdk/_compat.py new file mode 100644 index 00000000..92d9ee61 --- /dev/null +++ b/src/digitalocean_genai_sdk/_compat.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload +from datetime import date, datetime +from typing_extensions import Self, Literal + +import pydantic +from pydantic.fields import FieldInfo + +from ._types import IncEx, StrBytesIntFloat + +_T = TypeVar("_T") +_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) + +# --------------- Pydantic v2 compatibility --------------- + +# Pyright incorrectly reports some of our functions as overriding a method when they don't +# pyright: reportIncompatibleMethodOverride=false + +PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + +# v1 re-exports +if TYPE_CHECKING: + + def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001 + ... + + def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001 + ... + + def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001 + ... + + def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001 + ... + + def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001 + ... + + def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001 + ... + + def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001 + ... + +else: + if PYDANTIC_V2: + from pydantic.v1.typing import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, + ) + from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime + else: + from pydantic.typing import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, + ) + from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime + + +# refactored config +if TYPE_CHECKING: + from pydantic import ConfigDict as ConfigDict +else: + if PYDANTIC_V2: + from pydantic import ConfigDict + else: + # TODO: provide an error message here? + ConfigDict = None + + +# renamed methods / properties +def parse_obj(model: type[_ModelT], value: object) -> _ModelT: + if PYDANTIC_V2: + return model.model_validate(value) + else: + return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + + +def field_is_required(field: FieldInfo) -> bool: + if PYDANTIC_V2: + return field.is_required() + return field.required # type: ignore + + +def field_get_default(field: FieldInfo) -> Any: + value = field.get_default() + if PYDANTIC_V2: + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None + return value + return value + + +def field_outer_type(field: FieldInfo) -> Any: + if PYDANTIC_V2: + return field.annotation + return field.outer_type_ # type: ignore + + +def get_model_config(model: type[pydantic.BaseModel]) -> Any: + if PYDANTIC_V2: + return model.model_config + return model.__config__ # type: ignore + + +def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: + if PYDANTIC_V2: + return model.model_fields + return model.__fields__ # type: ignore + + +def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT: + if PYDANTIC_V2: + return model.model_copy(deep=deep) + return model.copy(deep=deep) # type: ignore + + +def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: + if PYDANTIC_V2: + return model.model_dump_json(indent=indent) + return model.json(indent=indent) # type: ignore + + +def model_dump( + model: pydantic.BaseModel, + *, + exclude: IncEx | None = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + warnings: bool = True, + mode: Literal["json", "python"] = "python", +) -> dict[str, Any]: + if PYDANTIC_V2 or hasattr(model, "model_dump"): + return model.model_dump( + mode=mode, + exclude=exclude, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + # warnings are not supported in Pydantic v1 + warnings=warnings if PYDANTIC_V2 else True, + ) + return cast( + "dict[str, Any]", + model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + exclude=exclude, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + ), + ) + + +def model_parse(model: type[_ModelT], data: Any) -> _ModelT: + if PYDANTIC_V2: + return model.model_validate(data) + return model.parse_obj(data) # pyright: ignore[reportDeprecated] + + +# generic models +if TYPE_CHECKING: + + class GenericModel(pydantic.BaseModel): ... + +else: + if PYDANTIC_V2: + # there no longer needs to be a distinction in v2 but + # we still have to create our own subclass to avoid + # inconsistent MRO ordering errors + class GenericModel(pydantic.BaseModel): ... + + else: + import pydantic.generics + + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... + + +# cached properties +if TYPE_CHECKING: + cached_property = property + + # we define a separate type (copied from typeshed) + # that represents that `cached_property` is `set`able + # at runtime, which differs from `@property`. + # + # this is a separate type as editors likely special case + # `@property` and we don't want to cause issues just to have + # more helpful internal types. + + class typed_cached_property(Generic[_T]): + func: Callable[[Any], _T] + attrname: str | None + + def __init__(self, func: Callable[[Any], _T]) -> None: ... + + @overload + def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ... + + @overload + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... + + def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self: + raise NotImplementedError() + + def __set_name__(self, owner: type[Any], name: str) -> None: ... + + # __set__ is not defined at runtime, but @cached_property is designed to be settable + def __set__(self, instance: object, value: _T) -> None: ... +else: + from functools import cached_property as cached_property + + typed_cached_property = cached_property diff --git a/src/digitalocean_genai_sdk/_constants.py b/src/digitalocean_genai_sdk/_constants.py new file mode 100644 index 00000000..6ddf2c71 --- /dev/null +++ b/src/digitalocean_genai_sdk/_constants.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import httpx + +RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" +OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" + +# default timeout is 1 minute +DEFAULT_TIMEOUT = httpx.Timeout(timeout=60, connect=5.0) +DEFAULT_MAX_RETRIES = 2 +DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20) + +INITIAL_RETRY_DELAY = 0.5 +MAX_RETRY_DELAY = 8.0 diff --git a/src/digitalocean_genai_sdk/_exceptions.py b/src/digitalocean_genai_sdk/_exceptions.py new file mode 100644 index 00000000..755e166e --- /dev/null +++ b/src/digitalocean_genai_sdk/_exceptions.py @@ -0,0 +1,108 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal + +import httpx + +__all__ = [ + "BadRequestError", + "AuthenticationError", + "PermissionDeniedError", + "NotFoundError", + "ConflictError", + "UnprocessableEntityError", + "RateLimitError", + "InternalServerError", +] + + +class DigitaloceanGenaiSDKError(Exception): + pass + + +class APIError(DigitaloceanGenaiSDKError): + message: str + request: httpx.Request + + body: object | None + """The API response body. + + If the API responded with a valid JSON structure then this property will be the + decoded result. + + If it isn't a valid JSON structure then this will be the raw response. + + If there was no response associated with this error then it will be `None`. + """ + + def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None: # noqa: ARG002 + super().__init__(message) + self.request = request + self.message = message + self.body = body + + +class APIResponseValidationError(APIError): + response: httpx.Response + status_code: int + + def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None: + super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body) + self.response = response + self.status_code = response.status_code + + +class APIStatusError(APIError): + """Raised when an API response has a status code of 4xx or 5xx.""" + + response: httpx.Response + status_code: int + + def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None: + super().__init__(message, response.request, body=body) + self.response = response + self.status_code = response.status_code + + +class APIConnectionError(APIError): + def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None: + super().__init__(message, request, body=None) + + +class APITimeoutError(APIConnectionError): + def __init__(self, request: httpx.Request) -> None: + super().__init__(message="Request timed out.", request=request) + + +class BadRequestError(APIStatusError): + status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride] + + +class AuthenticationError(APIStatusError): + status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride] + + +class PermissionDeniedError(APIStatusError): + status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride] + + +class NotFoundError(APIStatusError): + status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride] + + +class ConflictError(APIStatusError): + status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride] + + +class UnprocessableEntityError(APIStatusError): + status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride] + + +class RateLimitError(APIStatusError): + status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride] + + +class InternalServerError(APIStatusError): + pass diff --git a/src/digitalocean_genai_sdk/_files.py b/src/digitalocean_genai_sdk/_files.py new file mode 100644 index 00000000..df28b382 --- /dev/null +++ b/src/digitalocean_genai_sdk/_files.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import io +import os +import pathlib +from typing import overload +from typing_extensions import TypeGuard + +import anyio + +from ._types import ( + FileTypes, + FileContent, + RequestFiles, + HttpxFileTypes, + Base64FileInput, + HttpxFileContent, + HttpxRequestFiles, +) +from ._utils import is_tuple_t, is_mapping_t, is_sequence_t + + +def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]: + return isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike) + + +def is_file_content(obj: object) -> TypeGuard[FileContent]: + return ( + isinstance(obj, bytes) or isinstance(obj, tuple) or isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike) + ) + + +def assert_is_file_content(obj: object, *, key: str | None = None) -> None: + if not is_file_content(obj): + prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" + raise RuntimeError( + f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main#file-uploads" + ) from None + + +@overload +def to_httpx_files(files: None) -> None: ... + + +@overload +def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... + + +def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: + if files is None: + return None + + if is_mapping_t(files): + files = {key: _transform_file(file) for key, file in files.items()} + elif is_sequence_t(files): + files = [(key, _transform_file(file)) for key, file in files] + else: + raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence") + + return files + + +def _transform_file(file: FileTypes) -> HttpxFileTypes: + if is_file_content(file): + if isinstance(file, os.PathLike): + path = pathlib.Path(file) + return (path.name, path.read_bytes()) + + return file + + if is_tuple_t(file): + return (file[0], _read_file_content(file[1]), *file[2:]) + + raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") + + +def _read_file_content(file: FileContent) -> HttpxFileContent: + if isinstance(file, os.PathLike): + return pathlib.Path(file).read_bytes() + return file + + +@overload +async def async_to_httpx_files(files: None) -> None: ... + + +@overload +async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... + + +async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: + if files is None: + return None + + if is_mapping_t(files): + files = {key: await _async_transform_file(file) for key, file in files.items()} + elif is_sequence_t(files): + files = [(key, await _async_transform_file(file)) for key, file in files] + else: + raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence") + + return files + + +async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: + if is_file_content(file): + if isinstance(file, os.PathLike): + path = anyio.Path(file) + return (path.name, await path.read_bytes()) + + return file + + if is_tuple_t(file): + return (file[0], await _async_read_file_content(file[1]), *file[2:]) + + raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") + + +async def _async_read_file_content(file: FileContent) -> HttpxFileContent: + if isinstance(file, os.PathLike): + return await anyio.Path(file).read_bytes() + + return file diff --git a/src/digitalocean_genai_sdk/_models.py b/src/digitalocean_genai_sdk/_models.py new file mode 100644 index 00000000..798956f1 --- /dev/null +++ b/src/digitalocean_genai_sdk/_models.py @@ -0,0 +1,803 @@ +from __future__ import annotations + +import os +import inspect +from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast +from datetime import date, datetime +from typing_extensions import ( + Unpack, + Literal, + ClassVar, + Protocol, + Required, + ParamSpec, + TypedDict, + TypeGuard, + final, + override, + runtime_checkable, +) + +import pydantic +from pydantic.fields import FieldInfo + +from ._types import ( + Body, + IncEx, + Query, + ModelT, + Headers, + Timeout, + NotGiven, + AnyMapping, + HttpxRequestFiles, +) +from ._utils import ( + PropertyInfo, + is_list, + is_given, + json_safe, + lru_cache, + is_mapping, + parse_date, + coerce_boolean, + parse_datetime, + strip_not_given, + extract_type_arg, + is_annotated_type, + is_type_alias_type, + strip_annotated_type, +) +from ._compat import ( + PYDANTIC_V2, + ConfigDict, + GenericModel as BaseGenericModel, + get_args, + is_union, + parse_obj, + get_origin, + is_literal_type, + get_model_config, + get_model_fields, + field_get_default, +) +from ._constants import RAW_RESPONSE_HEADER + +if TYPE_CHECKING: + from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema + +__all__ = ["BaseModel", "GenericModel"] + +_T = TypeVar("_T") +_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel") + +P = ParamSpec("P") + + +@runtime_checkable +class _ConfigProtocol(Protocol): + allow_population_by_field_name: bool + + +class BaseModel(pydantic.BaseModel): + if PYDANTIC_V2: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) + ) + else: + + @property + @override + def model_fields_set(self) -> set[str]: + # a forwards-compat shim for pydantic v2 + return self.__fields_set__ # type: ignore + + class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] + extra: Any = pydantic.Extra.allow # type: ignore + + def to_dict( + self, + *, + mode: Literal["json", "python"] = "python", + use_api_names: bool = True, + exclude_unset: bool = True, + exclude_defaults: bool = False, + exclude_none: bool = False, + warnings: bool = True, + ) -> dict[str, object]: + """Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + By default, fields that were not set by the API will not be included, + and keys will match the API response, *not* the property names from the model. + + For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, + the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). + + Args: + mode: + If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`. + If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)` + + use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that are set to their default value from the output. + exclude_none: Whether to exclude fields that have a value of `None` from the output. + warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2. + """ + return self.model_dump( + mode=mode, + by_alias=use_api_names, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + warnings=warnings, + ) + + def to_json( + self, + *, + indent: int | None = 2, + use_api_names: bool = True, + exclude_unset: bool = True, + exclude_defaults: bool = False, + exclude_none: bool = False, + warnings: bool = True, + ) -> str: + """Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation). + + By default, fields that were not set by the API will not be included, + and keys will match the API response, *not* the property names from the model. + + For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, + the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). + + Args: + indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2` + use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that have the default value. + exclude_none: Whether to exclude fields that have a value of `None`. + warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2. + """ + return self.model_dump_json( + indent=indent, + by_alias=use_api_names, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + warnings=warnings, + ) + + @override + def __str__(self) -> str: + # mypy complains about an invalid self arg + return f"{self.__repr_name__()}({self.__repr_str__(', ')})" # type: ignore[misc] + + # Override the 'construct' method in a way that supports recursive parsing without validation. + # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. + @classmethod + @override + def construct( # pyright: ignore[reportIncompatibleMethodOverride] + __cls: Type[ModelT], + _fields_set: set[str] | None = None, + **values: object, + ) -> ModelT: + m = __cls.__new__(__cls) + fields_values: dict[str, object] = {} + + config = get_model_config(__cls) + populate_by_name = ( + config.allow_population_by_field_name + if isinstance(config, _ConfigProtocol) + else config.get("populate_by_name") + ) + + if _fields_set is None: + _fields_set = set() + + model_fields = get_model_fields(__cls) + for name, field in model_fields.items(): + key = field.alias + if key is None or (key not in values and populate_by_name): + key = name + + if key in values: + fields_values[name] = _construct_field(value=values[key], field=field, key=key) + _fields_set.add(name) + else: + fields_values[name] = field_get_default(field) + + _extra = {} + for key, value in values.items(): + if key not in model_fields: + if PYDANTIC_V2: + _extra[key] = value + else: + _fields_set.add(key) + fields_values[key] = value + + object.__setattr__(m, "__dict__", fields_values) + + if PYDANTIC_V2: + # these properties are copied from Pydantic's `model_construct()` method + object.__setattr__(m, "__pydantic_private__", None) + object.__setattr__(m, "__pydantic_extra__", _extra) + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) + else: + # init_private_attributes() does not exist in v2 + m._init_private_attributes() # type: ignore + + # copied from Pydantic v1's `construct()` method + object.__setattr__(m, "__fields_set__", _fields_set) + + return m + + if not TYPE_CHECKING: + # type checkers incorrectly complain about this assignment + # because the type signatures are technically different + # although not in practice + model_construct = construct + + if not PYDANTIC_V2: + # we define aliases for some of the new pydantic v2 methods so + # that we can just document these methods without having to specify + # a specific pydantic version as some users may not know which + # pydantic version they are currently using + + @override + def model_dump( + self, + *, + mode: Literal["json", "python"] | str = "python", + include: IncEx | None = None, + exclude: IncEx | None = None, + by_alias: bool = False, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + round_trip: bool = False, + warnings: bool | Literal["none", "warn", "error"] = True, + context: dict[str, Any] | None = None, + serialize_as_any: bool = False, + ) -> dict[str, Any]: + """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump + + Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + Args: + mode: The mode in which `to_python` should run. + If mode is 'json', the dictionary will only contain JSON serializable types. + If mode is 'python', the dictionary may contain any Python objects. + include: A list of fields to include in the output. + exclude: A list of fields to exclude from the output. + by_alias: Whether to use the field's alias in the dictionary key if defined. + exclude_unset: Whether to exclude fields that are unset or None from the output. + exclude_defaults: Whether to exclude fields that are set to their default value from the output. + exclude_none: Whether to exclude fields that have a value of `None` from the output. + round_trip: Whether to enable serialization and deserialization round-trip support. + warnings: Whether to log warnings when invalid fields are encountered. + + Returns: + A dictionary representation of the model. + """ + if mode not in {"json", "python"}: + raise ValueError("mode must be either 'json' or 'python'") + if round_trip != False: + raise ValueError("round_trip is only supported in Pydantic v2") + if warnings != True: + raise ValueError("warnings is only supported in Pydantic v2") + if context is not None: + raise ValueError("context is only supported in Pydantic v2") + if serialize_as_any != False: + raise ValueError("serialize_as_any is only supported in Pydantic v2") + dumped = super().dict( # pyright: ignore[reportDeprecated] + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped + + @override + def model_dump_json( + self, + *, + indent: int | None = None, + include: IncEx | None = None, + exclude: IncEx | None = None, + by_alias: bool = False, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + round_trip: bool = False, + warnings: bool | Literal["none", "warn", "error"] = True, + context: dict[str, Any] | None = None, + serialize_as_any: bool = False, + ) -> str: + """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json + + Generates a JSON representation of the model using Pydantic's `to_json` method. + + Args: + indent: Indentation to use in the JSON output. If None is passed, the output will be compact. + include: Field(s) to include in the JSON output. Can take either a string or set of strings. + exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings. + by_alias: Whether to serialize using field aliases. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that have the default value. + exclude_none: Whether to exclude fields that have a value of `None`. + round_trip: Whether to use serialization/deserialization between JSON and class instance. + warnings: Whether to show any warnings that occurred during serialization. + + Returns: + A JSON string representation of the model. + """ + if round_trip != False: + raise ValueError("round_trip is only supported in Pydantic v2") + if warnings != True: + raise ValueError("warnings is only supported in Pydantic v2") + if context is not None: + raise ValueError("context is only supported in Pydantic v2") + if serialize_as_any != False: + raise ValueError("serialize_as_any is only supported in Pydantic v2") + return super().json( # type: ignore[reportDeprecated] + indent=indent, + include=include, + exclude=exclude, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + + +def _construct_field(value: object, field: FieldInfo, key: str) -> object: + if value is None: + return field_get_default(field) + + if PYDANTIC_V2: + type_ = field.annotation + else: + type_ = cast(type, field.outer_type_) # type: ignore + + if type_ is None: + raise RuntimeError(f"Unexpected field type is None for {key}") + + return construct_type(value=value, type_=type_) + + +def is_basemodel(type_: type) -> bool: + """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" + if is_union(type_): + for variant in get_args(type_): + if is_basemodel(variant): + return True + + return False + + return is_basemodel_type(type_) + + +def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: + origin = get_origin(type_) or type_ + if not inspect.isclass(origin): + return False + return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) + + +def build( + base_model_cls: Callable[P, _BaseModelT], + *args: P.args, + **kwargs: P.kwargs, +) -> _BaseModelT: + """Construct a BaseModel class without validation. + + This is useful for cases where you need to instantiate a `BaseModel` + from an API response as this provides type-safe params which isn't supported + by helpers like `construct_type()`. + + ```py + build(MyModel, my_field_a="foo", my_field_b=123) + ``` + """ + if args: + raise TypeError( + "Received positional arguments which are not supported; Keyword arguments must be used instead", + ) + + return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) + + +def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: + """Loose coercion to the expected type with construction of nested values. + + Note: the returned value from this function is not guaranteed to match the + given type. + """ + return cast(_T, construct_type(value=value, type_=type_)) + + +def construct_type(*, value: object, type_: object) -> object: + """Loose coercion to the expected type with construction of nested values. + + If the given value does not match the expected type then it is returned as-is. + """ + + # store a reference to the original type we were given before we extract any inner + # types so that we can properly resolve forward references in `TypeAliasType` annotations + original_type = None + + # we allow `object` as the input type because otherwise, passing things like + # `Literal['value']` will be reported as a type error by type checkers + type_ = cast("type[object]", type_) + if is_type_alias_type(type_): + original_type = type_ # type: ignore[unreachable] + type_ = type_.__value__ # type: ignore[unreachable] + + # unwrap `Annotated[T, ...]` -> `T` + if is_annotated_type(type_): + meta: tuple[Any, ...] = get_args(type_)[1:] + type_ = extract_type_arg(type_, 0) + else: + meta = tuple() + + # we need to use the origin class for any types that are subscripted generics + # e.g. Dict[str, object] + origin = get_origin(type_) or type_ + args = get_args(type_) + + if is_union(origin): + try: + return validate_type(type_=cast("type[object]", original_type or type_), value=value) + except Exception: + pass + + # if the type is a discriminated union then we want to construct the right variant + # in the union, even if the data doesn't match exactly, otherwise we'd break code + # that relies on the constructed class types, e.g. + # + # class FooType: + # kind: Literal['foo'] + # value: str + # + # class BarType: + # kind: Literal['bar'] + # value: int + # + # without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then + # we'd end up constructing `FooType` when it should be `BarType`. + discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta) + if discriminator and is_mapping(value): + variant_value = value.get(discriminator.field_alias_from or discriminator.field_name) + if variant_value and isinstance(variant_value, str): + variant_type = discriminator.mapping.get(variant_value) + if variant_type: + return construct_type(type_=variant_type, value=value) + + # if the data is not valid, use the first variant that doesn't fail while deserializing + for variant in args: + try: + return construct_type(value=value, type_=variant) + except Exception: + continue + + raise RuntimeError(f"Could not convert data into a valid instance of {type_}") + + if origin == dict: + if not is_mapping(value): + return value + + _, items_type = get_args(type_) # Dict[_, items_type] + return {key: construct_type(value=item, type_=items_type) for key, item in value.items()} + + if ( + not is_literal_type(type_) + and inspect.isclass(origin) + and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)) + ): + if is_list(value): + return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value] + + if is_mapping(value): + if issubclass(type_, BaseModel): + return type_.construct(**value) # type: ignore[arg-type] + + return cast(Any, type_).construct(**value) + + if origin == list: + if not is_list(value): + return value + + inner_type = args[0] # List[inner_type] + return [construct_type(value=entry, type_=inner_type) for entry in value] + + if origin == float: + if isinstance(value, int): + coerced = float(value) + if coerced != value: + return value + return coerced + + return value + + if type_ == datetime: + try: + return parse_datetime(value) # type: ignore + except Exception: + return value + + if type_ == date: + try: + return parse_date(value) # type: ignore + except Exception: + return value + + return value + + +@runtime_checkable +class CachedDiscriminatorType(Protocol): + __discriminator__: DiscriminatorDetails + + +class DiscriminatorDetails: + field_name: str + """The name of the discriminator field in the variant class, e.g. + + ```py + class Foo(BaseModel): + type: Literal['foo'] + ``` + + Will result in field_name='type' + """ + + field_alias_from: str | None + """The name of the discriminator field in the API response, e.g. + + ```py + class Foo(BaseModel): + type: Literal['foo'] = Field(alias='type_from_api') + ``` + + Will result in field_alias_from='type_from_api' + """ + + mapping: dict[str, type] + """Mapping of discriminator value to variant type, e.g. + + {'foo': FooVariant, 'bar': BarVariant} + """ + + def __init__( + self, + *, + mapping: dict[str, type], + discriminator_field: str, + discriminator_alias: str | None, + ) -> None: + self.mapping = mapping + self.field_name = discriminator_field + self.field_alias_from = discriminator_alias + + +def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: + if isinstance(union, CachedDiscriminatorType): + return union.__discriminator__ + + discriminator_field_name: str | None = None + + for annotation in meta_annotations: + if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None: + discriminator_field_name = annotation.discriminator + break + + if not discriminator_field_name: + return None + + mapping: dict[str, type] = {} + discriminator_alias: str | None = None + + for variant in get_args(union): + variant = strip_annotated_type(variant) + if is_basemodel_type(variant): + if PYDANTIC_V2: + field = _extract_field_schema_pv2(variant, discriminator_field_name) + if not field: + continue + + # Note: if one variant defines an alias then they all should + discriminator_alias = field.get("serialization_alias") + + field_schema = field["schema"] + + if field_schema["type"] == "literal": + for entry in cast("LiteralSchema", field_schema)["expected"]: + if isinstance(entry, str): + mapping[entry] = variant + else: + field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + if not field_info: + continue + + # Note: if one variant defines an alias then they all should + discriminator_alias = field_info.alias + + if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): + for entry in get_args(annotation): + if isinstance(entry, str): + mapping[entry] = variant + + if not mapping: + return None + + details = DiscriminatorDetails( + mapping=mapping, + discriminator_field=discriminator_field_name, + discriminator_alias=discriminator_alias, + ) + cast(CachedDiscriminatorType, union).__discriminator__ = details + return details + + +def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: + schema = model.__pydantic_core_schema__ + if schema["type"] == "definitions": + schema = schema["schema"] + + if schema["type"] != "model": + return None + + schema = cast("ModelSchema", schema) + fields_schema = schema["schema"] + if fields_schema["type"] != "model-fields": + return None + + fields_schema = cast("ModelFieldsSchema", fields_schema) + field = fields_schema["fields"].get(field_name) + if not field: + return None + + return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast] + + +def validate_type(*, type_: type[_T], value: object) -> _T: + """Strict validation that the given value matches the expected type""" + if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): + return cast(_T, parse_obj(type_, value)) + + return cast(_T, _validate_non_model_type(type_=type_, value=value)) + + +def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None: + """Add a pydantic config for the given type. + + Note: this is a no-op on Pydantic v1. + """ + setattr(typ, "__pydantic_config__", config) # noqa: B010 + + +# our use of subclassing here causes weirdness for type checkers, +# so we just pretend that we don't subclass +if TYPE_CHECKING: + GenericModel = BaseModel +else: + + class GenericModel(BaseGenericModel, BaseModel): + pass + + +if PYDANTIC_V2: + from pydantic import TypeAdapter as _TypeAdapter + + _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter)) + + if TYPE_CHECKING: + from pydantic import TypeAdapter + else: + TypeAdapter = _CachedTypeAdapter + + def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: + return TypeAdapter(type_).validate_python(value) + +elif not TYPE_CHECKING: # TODO: condition is weird + + class RootModel(GenericModel, Generic[_T]): + """Used as a placeholder to easily convert runtime types to a Pydantic format + to provide validation. + + For example: + ```py + validated = RootModel[int](__root__="5").__root__ + # validated: 5 + ``` + """ + + __root__: _T + + def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: + model = _create_pydantic_model(type_).validate(value) + return cast(_T, model.__root__) + + def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]: + return RootModel[type_] # type: ignore + + +class FinalRequestOptionsInput(TypedDict, total=False): + method: Required[str] + url: Required[str] + params: Query + headers: Headers + max_retries: int + timeout: float | Timeout | None + files: HttpxRequestFiles | None + idempotency_key: str + json_data: Body + extra_json: AnyMapping + + +@final +class FinalRequestOptions(pydantic.BaseModel): + method: str + url: str + params: Query = {} + headers: Union[Headers, NotGiven] = NotGiven() + max_retries: Union[int, NotGiven] = NotGiven() + timeout: Union[float, Timeout, None, NotGiven] = NotGiven() + files: Union[HttpxRequestFiles, None] = None + idempotency_key: Union[str, None] = None + post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + + # It should be noted that we cannot use `json` here as that would override + # a BaseModel method in an incompatible fashion. + json_data: Union[Body, None] = None + extra_json: Union[AnyMapping, None] = None + + if PYDANTIC_V2: + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) + else: + + class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] + arbitrary_types_allowed: bool = True + + def get_max_retries(self, max_retries: int) -> int: + if isinstance(self.max_retries, NotGiven): + return max_retries + return self.max_retries + + def _strip_raw_response_header(self) -> None: + if not is_given(self.headers): + return + + if self.headers.get(RAW_RESPONSE_HEADER): + self.headers = {**self.headers} + self.headers.pop(RAW_RESPONSE_HEADER) + + # override the `construct` method so that we can run custom transformations. + # this is necessary as we don't want to do any actual runtime type checking + # (which means we can't use validators) but we do want to ensure that `NotGiven` + # values are not present + # + # type ignore required because we're adding explicit types to `**values` + @classmethod + def construct( # type: ignore + cls, + _fields_set: set[str] | None = None, + **values: Unpack[FinalRequestOptionsInput], + ) -> FinalRequestOptions: + kwargs: dict[str, Any] = { + # we unconditionally call `strip_not_given` on any value + # as it will just ignore any non-mapping types + key: strip_not_given(value) + for key, value in values.items() + } + if PYDANTIC_V2: + return super().model_construct(_fields_set, **kwargs) + return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + + if not TYPE_CHECKING: + # type checkers incorrectly complain about this assignment + model_construct = construct diff --git a/src/digitalocean_genai_sdk/_qs.py b/src/digitalocean_genai_sdk/_qs.py new file mode 100644 index 00000000..274320ca --- /dev/null +++ b/src/digitalocean_genai_sdk/_qs.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from typing import Any, List, Tuple, Union, Mapping, TypeVar +from urllib.parse import parse_qs, urlencode +from typing_extensions import Literal, get_args + +from ._types import NOT_GIVEN, NotGiven, NotGivenOr +from ._utils import flatten + +_T = TypeVar("_T") + + +ArrayFormat = Literal["comma", "repeat", "indices", "brackets"] +NestedFormat = Literal["dots", "brackets"] + +PrimitiveData = Union[str, int, float, bool, None] +# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"] +# https://github.com/microsoft/pyright/issues/3555 +Data = Union[PrimitiveData, List[Any], Tuple[Any], "Mapping[str, Any]"] +Params = Mapping[str, Data] + + +class Querystring: + array_format: ArrayFormat + nested_format: NestedFormat + + def __init__( + self, + *, + array_format: ArrayFormat = "repeat", + nested_format: NestedFormat = "brackets", + ) -> None: + self.array_format = array_format + self.nested_format = nested_format + + def parse(self, query: str) -> Mapping[str, object]: + # Note: custom format syntax is not supported yet + return parse_qs(query) + + def stringify( + self, + params: Params, + *, + array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, + nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + ) -> str: + return urlencode( + self.stringify_items( + params, + array_format=array_format, + nested_format=nested_format, + ) + ) + + def stringify_items( + self, + params: Params, + *, + array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, + nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + ) -> list[tuple[str, str]]: + opts = Options( + qs=self, + array_format=array_format, + nested_format=nested_format, + ) + return flatten([self._stringify_item(key, value, opts) for key, value in params.items()]) + + def _stringify_item( + self, + key: str, + value: Data, + opts: Options, + ) -> list[tuple[str, str]]: + if isinstance(value, Mapping): + items: list[tuple[str, str]] = [] + nested_format = opts.nested_format + for subkey, subvalue in value.items(): + items.extend( + self._stringify_item( + # TODO: error if unknown format + f"{key}.{subkey}" if nested_format == "dots" else f"{key}[{subkey}]", + subvalue, + opts, + ) + ) + return items + + if isinstance(value, (list, tuple)): + array_format = opts.array_format + if array_format == "comma": + return [ + ( + key, + ",".join(self._primitive_value_to_str(item) for item in value if item is not None), + ), + ] + elif array_format == "repeat": + items = [] + for item in value: + items.extend(self._stringify_item(key, item, opts)) + return items + elif array_format == "indices": + raise NotImplementedError("The array indices format is not supported yet") + elif array_format == "brackets": + items = [] + key = key + "[]" + for item in value: + items.extend(self._stringify_item(key, item, opts)) + return items + else: + raise NotImplementedError( + f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}" + ) + + serialised = self._primitive_value_to_str(value) + if not serialised: + return [] + return [(key, serialised)] + + def _primitive_value_to_str(self, value: PrimitiveData) -> str: + # copied from httpx + if value is True: + return "true" + elif value is False: + return "false" + elif value is None: + return "" + return str(value) + + +_qs = Querystring() +parse = _qs.parse +stringify = _qs.stringify +stringify_items = _qs.stringify_items + + +class Options: + array_format: ArrayFormat + nested_format: NestedFormat + + def __init__( + self, + qs: Querystring = _qs, + *, + array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, + nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + ) -> None: + self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format + self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format diff --git a/src/digitalocean_genai_sdk/_resource.py b/src/digitalocean_genai_sdk/_resource.py new file mode 100644 index 00000000..fe43ec28 --- /dev/null +++ b/src/digitalocean_genai_sdk/_resource.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import time +from typing import TYPE_CHECKING + +import anyio + +if TYPE_CHECKING: + from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + + +class SyncAPIResource: + _client: DigitaloceanGenaiSDK + + def __init__(self, client: DigitaloceanGenaiSDK) -> None: + self._client = client + self._get = client.get + self._post = client.post + self._patch = client.patch + self._put = client.put + self._delete = client.delete + self._get_api_list = client.get_api_list + + def _sleep(self, seconds: float) -> None: + time.sleep(seconds) + + +class AsyncAPIResource: + _client: AsyncDigitaloceanGenaiSDK + + def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + self._client = client + self._get = client.get + self._post = client.post + self._patch = client.patch + self._put = client.put + self._delete = client.delete + self._get_api_list = client.get_api_list + + async def _sleep(self, seconds: float) -> None: + await anyio.sleep(seconds) diff --git a/src/digitalocean_genai_sdk/_response.py b/src/digitalocean_genai_sdk/_response.py new file mode 100644 index 00000000..7f1fff1d --- /dev/null +++ b/src/digitalocean_genai_sdk/_response.py @@ -0,0 +1,832 @@ +from __future__ import annotations + +import os +import inspect +import logging +import datetime +import functools +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Union, + Generic, + TypeVar, + Callable, + Iterator, + AsyncIterator, + cast, + overload, +) +from typing_extensions import Awaitable, ParamSpec, override, get_origin + +import anyio +import httpx +import pydantic + +from ._types import NoneType +from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base +from ._models import BaseModel, is_basemodel +from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER +from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type +from ._exceptions import DigitaloceanGenaiSDKError, APIResponseValidationError + +if TYPE_CHECKING: + from ._models import FinalRequestOptions + from ._base_client import BaseClient + + +P = ParamSpec("P") +R = TypeVar("R") +_T = TypeVar("_T") +_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]") +_AsyncAPIResponseT = TypeVar("_AsyncAPIResponseT", bound="AsyncAPIResponse[Any]") + +log: logging.Logger = logging.getLogger(__name__) + + +class BaseAPIResponse(Generic[R]): + _cast_to: type[R] + _client: BaseClient[Any, Any] + _parsed_by_type: dict[type[Any], Any] + _is_sse_stream: bool + _stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None + _options: FinalRequestOptions + + http_response: httpx.Response + + retries_taken: int + """The number of retries made. If no retries happened this will be `0`""" + + def __init__( + self, + *, + raw: httpx.Response, + cast_to: type[R], + client: BaseClient[Any, Any], + stream: bool, + stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, + options: FinalRequestOptions, + retries_taken: int = 0, + ) -> None: + self._cast_to = cast_to + self._client = client + self._parsed_by_type = {} + self._is_sse_stream = stream + self._stream_cls = stream_cls + self._options = options + self.http_response = raw + self.retries_taken = retries_taken + + @property + def headers(self) -> httpx.Headers: + return self.http_response.headers + + @property + def http_request(self) -> httpx.Request: + """Returns the httpx Request instance associated with the current response.""" + return self.http_response.request + + @property + def status_code(self) -> int: + return self.http_response.status_code + + @property + def url(self) -> httpx.URL: + """Returns the URL for which the request was made.""" + return self.http_response.url + + @property + def method(self) -> str: + return self.http_request.method + + @property + def http_version(self) -> str: + return self.http_response.http_version + + @property + def elapsed(self) -> datetime.timedelta: + """The time taken for the complete request/response cycle to complete.""" + return self.http_response.elapsed + + @property + def is_closed(self) -> bool: + """Whether or not the response body has been closed. + + If this is False then there is response data that has not been read yet. + You must either fully consume the response body or call `.close()` + before discarding the response to prevent resource leaks. + """ + return self.http_response.is_closed + + @override + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>" + ) + + def _parse(self, *, to: type[_T] | None = None) -> R | _T: + cast_to = to if to is not None else self._cast_to + + # unwrap `TypeAlias('Name', T)` -> `T` + if is_type_alias_type(cast_to): + cast_to = cast_to.__value__ # type: ignore[unreachable] + + # unwrap `Annotated[T, ...]` -> `T` + if cast_to and is_annotated_type(cast_to): + cast_to = extract_type_arg(cast_to, 0) + + origin = get_origin(cast_to) or cast_to + + if self._is_sse_stream: + if to: + if not is_stream_class_type(to): + raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}") + + return cast( + _T, + to( + cast_to=extract_stream_chunk_type( + to, + failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]", + ), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + if self._stream_cls: + return cast( + R, + self._stream_cls( + cast_to=extract_stream_chunk_type(self._stream_cls), + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls) + if stream_cls is None: + raise MissingStreamClassError() + + return cast( + R, + stream_cls( + cast_to=cast_to, + response=self.http_response, + client=cast(Any, self._client), + ), + ) + + if cast_to is NoneType: + return cast(R, None) + + response = self.http_response + if cast_to == str: + return cast(R, response.text) + + if cast_to == bytes: + return cast(R, response.content) + + if cast_to == int: + return cast(R, int(response.text)) + + if cast_to == float: + return cast(R, float(response.text)) + + if cast_to == bool: + return cast(R, response.text.lower() == "true") + + if origin == APIResponse: + raise RuntimeError("Unexpected state - cast_to is `APIResponse`") + + if inspect.isclass(origin) and issubclass(origin, httpx.Response): + # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response + # and pass that class to our request functions. We cannot change the variance to be either + # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct + # the response class ourselves but that is something that should be supported directly in httpx + # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. + if cast_to != httpx.Response: + raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") + return cast(R, response) + + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): + raise TypeError( + "Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`" + ) + + if ( + cast_to is not object + and not origin is list + and not origin is dict + and not origin is Union + and not issubclass(origin, BaseModel) + ): + raise RuntimeError( + f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." + ) + + # split is required to handle cases where additional information is included + # in the response, e.g. application/json; charset=utf-8 + content_type, *_ = response.headers.get("content-type", "*").split(";") + if not content_type.endswith("json"): + if is_basemodel(cast_to): + try: + data = response.json() + except Exception as exc: + log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) + else: + return self._client._process_response_data( + data=data, + cast_to=cast_to, # type: ignore + response=response, + ) + + if self._client._strict_response_validation: + raise APIResponseValidationError( + response=response, + message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", + body=response.text, + ) + + # If the API responds with content that isn't JSON then we just return + # the (decoded) text without performing any parsing so that you can still + # handle the response however you need to. + return response.text # type: ignore + + data = response.json() + + return self._client._process_response_data( + data=data, + cast_to=cast_to, # type: ignore + response=response, + ) + + +class APIResponse(BaseAPIResponse[R]): + @overload + def parse(self, *, to: type[_T]) -> _T: ... + + @overload + def parse(self) -> R: ... + + def parse(self, *, to: type[_T] | None = None) -> R | _T: + """Returns the rich python representation of this response's data. + + For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. + + You can customise the type that the response is parsed into through + the `to` argument, e.g. + + ```py + from digitalocean_genai_sdk import BaseModel + + + class MyModel(BaseModel): + foo: str + + + obj = response.parse(to=MyModel) + print(obj.foo) + ``` + + We support parsing: + - `BaseModel` + - `dict` + - `list` + - `Union` + - `str` + - `int` + - `float` + - `httpx.Response` + """ + cache_key = to if to is not None else self._cast_to + cached = self._parsed_by_type.get(cache_key) + if cached is not None: + return cached # type: ignore[no-any-return] + + if not self._is_sse_stream: + self.read() + + parsed = self._parse(to=to) + if is_given(self._options.post_parser): + parsed = self._options.post_parser(parsed) + + self._parsed_by_type[cache_key] = parsed + return parsed + + def read(self) -> bytes: + """Read and return the binary response content.""" + try: + return self.http_response.read() + except httpx.StreamConsumed as exc: + # The default error raised by httpx isn't very + # helpful in our case so we re-raise it with + # a different error message. + raise StreamAlreadyConsumed() from exc + + def text(self) -> str: + """Read and decode the response content into a string.""" + self.read() + return self.http_response.text + + def json(self) -> object: + """Read and decode the JSON response content.""" + self.read() + return self.http_response.json() + + def close(self) -> None: + """Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + self.http_response.close() + + def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: + """ + A byte-iterator over the decoded response content. + + This automatically handles gzip, deflate and brotli encoded responses. + """ + for chunk in self.http_response.iter_bytes(chunk_size): + yield chunk + + def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: + """A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + for chunk in self.http_response.iter_text(chunk_size): + yield chunk + + def iter_lines(self) -> Iterator[str]: + """Like `iter_text()` but will only yield chunks for each line""" + for chunk in self.http_response.iter_lines(): + yield chunk + + +class AsyncAPIResponse(BaseAPIResponse[R]): + @overload + async def parse(self, *, to: type[_T]) -> _T: ... + + @overload + async def parse(self) -> R: ... + + async def parse(self, *, to: type[_T] | None = None) -> R | _T: + """Returns the rich python representation of this response's data. + + For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. + + You can customise the type that the response is parsed into through + the `to` argument, e.g. + + ```py + from digitalocean_genai_sdk import BaseModel + + + class MyModel(BaseModel): + foo: str + + + obj = response.parse(to=MyModel) + print(obj.foo) + ``` + + We support parsing: + - `BaseModel` + - `dict` + - `list` + - `Union` + - `str` + - `httpx.Response` + """ + cache_key = to if to is not None else self._cast_to + cached = self._parsed_by_type.get(cache_key) + if cached is not None: + return cached # type: ignore[no-any-return] + + if not self._is_sse_stream: + await self.read() + + parsed = self._parse(to=to) + if is_given(self._options.post_parser): + parsed = self._options.post_parser(parsed) + + self._parsed_by_type[cache_key] = parsed + return parsed + + async def read(self) -> bytes: + """Read and return the binary response content.""" + try: + return await self.http_response.aread() + except httpx.StreamConsumed as exc: + # the default error raised by httpx isn't very + # helpful in our case so we re-raise it with + # a different error message + raise StreamAlreadyConsumed() from exc + + async def text(self) -> str: + """Read and decode the response content into a string.""" + await self.read() + return self.http_response.text + + async def json(self) -> object: + """Read and decode the JSON response content.""" + await self.read() + return self.http_response.json() + + async def close(self) -> None: + """Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + await self.http_response.aclose() + + async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: + """ + A byte-iterator over the decoded response content. + + This automatically handles gzip, deflate and brotli encoded responses. + """ + async for chunk in self.http_response.aiter_bytes(chunk_size): + yield chunk + + async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: + """A str-iterator over the decoded response content + that handles both gzip, deflate, etc but also detects the content's + string encoding. + """ + async for chunk in self.http_response.aiter_text(chunk_size): + yield chunk + + async def iter_lines(self) -> AsyncIterator[str]: + """Like `iter_text()` but will only yield chunks for each line""" + async for chunk in self.http_response.aiter_lines(): + yield chunk + + +class BinaryAPIResponse(APIResponse[bytes]): + """Subclass of APIResponse providing helpers for dealing with binary data. + + Note: If you want to stream the response data instead of eagerly reading it + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + + def write_to_file( + self, + file: str | os.PathLike[str], + ) -> None: + """Write the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + + Note: if you want to stream the data to the file instead of writing + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + with open(file, mode="wb") as f: + for data in self.iter_bytes(): + f.write(data) + + +class AsyncBinaryAPIResponse(AsyncAPIResponse[bytes]): + """Subclass of APIResponse providing helpers for dealing with binary data. + + Note: If you want to stream the response data instead of eagerly reading it + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + + async def write_to_file( + self, + file: str | os.PathLike[str], + ) -> None: + """Write the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + + Note: if you want to stream the data to the file instead of writing + all at once then you should use `.with_streaming_response` when making + the API request, e.g. `.with_streaming_response.get_binary_response()` + """ + path = anyio.Path(file) + async with await path.open(mode="wb") as f: + async for data in self.iter_bytes(): + await f.write(data) + + +class StreamedBinaryAPIResponse(APIResponse[bytes]): + def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + """Streams the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + """ + with open(file, mode="wb") as f: + for data in self.iter_bytes(chunk_size): + f.write(data) + + +class AsyncStreamedBinaryAPIResponse(AsyncAPIResponse[bytes]): + async def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: + """Streams the output to the given file. + + Accepts a filename or any path-like object, e.g. pathlib.Path + """ + path = anyio.Path(file) + async with await path.open(mode="wb") as f: + async for data in self.iter_bytes(chunk_size): + await f.write(data) + + +class MissingStreamClassError(TypeError): + def __init__(self) -> None: + super().__init__( + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `digitalocean_genai_sdk._streaming` for reference", + ) + + +class StreamAlreadyConsumed(DigitaloceanGenaiSDKError): + """ + Attempted to read or stream content, but the content has already + been streamed. + + This can happen if you use a method like `.iter_lines()` and then attempt + to read th entire response body afterwards, e.g. + + ```py + response = await client.post(...) + async for line in response.iter_lines(): + ... # do something with `line` + + content = await response.read() + # ^ error + ``` + + If you want this behaviour you'll need to either manually accumulate the response + content or call `await response.read()` before iterating over the stream. + """ + + def __init__(self) -> None: + message = ( + "Attempted to read or stream some content, but the content has " + "already been streamed. " + "This could be due to attempting to stream the response " + "content more than once." + "\n\n" + "You can fix this by manually accumulating the response content while streaming " + "or by calling `.read()` before starting to stream." + ) + super().__init__(message) + + +class ResponseContextManager(Generic[_APIResponseT]): + """Context manager for ensuring that a request is not made + until it is entered and that the response will always be closed + when the context manager exits + """ + + def __init__(self, request_func: Callable[[], _APIResponseT]) -> None: + self._request_func = request_func + self.__response: _APIResponseT | None = None + + def __enter__(self) -> _APIResponseT: + self.__response = self._request_func() + return self.__response + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if self.__response is not None: + self.__response.close() + + +class AsyncResponseContextManager(Generic[_AsyncAPIResponseT]): + """Context manager for ensuring that a request is not made + until it is entered and that the response will always be closed + when the context manager exits + """ + + def __init__(self, api_request: Awaitable[_AsyncAPIResponseT]) -> None: + self._api_request = api_request + self.__response: _AsyncAPIResponseT | None = None + + async def __aenter__(self) -> _AsyncAPIResponseT: + self.__response = await self._api_request + return self.__response + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if self.__response is not None: + await self.__response.close() + + +def to_streamed_response_wrapper(func: Callable[P, R]) -> Callable[P, ResponseContextManager[APIResponse[R]]]: + """Higher order function that takes one of our bound API methods and wraps it + to support streaming and returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[APIResponse[R]]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + + kwargs["extra_headers"] = extra_headers + + make_request = functools.partial(func, *args, **kwargs) + + return ResponseContextManager(cast(Callable[[], APIResponse[R]], make_request)) + + return wrapped + + +def async_to_streamed_response_wrapper( + func: Callable[P, Awaitable[R]], +) -> Callable[P, AsyncResponseContextManager[AsyncAPIResponse[R]]]: + """Higher order function that takes one of our bound API methods and wraps it + to support streaming and returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[AsyncAPIResponse[R]]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + + kwargs["extra_headers"] = extra_headers + + make_request = func(*args, **kwargs) + + return AsyncResponseContextManager(cast(Awaitable[AsyncAPIResponse[R]], make_request)) + + return wrapped + + +def to_custom_streamed_response_wrapper( + func: Callable[P, object], + response_cls: type[_APIResponseT], +) -> Callable[P, ResponseContextManager[_APIResponseT]]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support streaming and returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[_APIResponseT]: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + make_request = functools.partial(func, *args, **kwargs) + + return ResponseContextManager(cast(Callable[[], _APIResponseT], make_request)) + + return wrapped + + +def async_to_custom_streamed_response_wrapper( + func: Callable[P, Awaitable[object]], + response_cls: type[_AsyncAPIResponseT], +) -> Callable[P, AsyncResponseContextManager[_AsyncAPIResponseT]]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support streaming and returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[_AsyncAPIResponseT]: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "stream" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + make_request = func(*args, **kwargs) + + return AsyncResponseContextManager(cast(Awaitable[_AsyncAPIResponseT], make_request)) + + return wrapped + + +def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, APIResponse[R]]: + """Higher order function that takes one of our bound API methods and wraps it + to support returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> APIResponse[R]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + + kwargs["extra_headers"] = extra_headers + + return cast(APIResponse[R], func(*args, **kwargs)) + + return wrapped + + +def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[AsyncAPIResponse[R]]]: + """Higher order function that takes one of our bound API methods and wraps it + to support returning the raw `APIResponse` object directly. + """ + + @functools.wraps(func) + async def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncAPIResponse[R]: + extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + + kwargs["extra_headers"] = extra_headers + + return cast(AsyncAPIResponse[R], await func(*args, **kwargs)) + + return wrapped + + +def to_custom_raw_response_wrapper( + func: Callable[P, object], + response_cls: type[_APIResponseT], +) -> Callable[P, _APIResponseT]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> _APIResponseT: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + return cast(_APIResponseT, func(*args, **kwargs)) + + return wrapped + + +def async_to_custom_raw_response_wrapper( + func: Callable[P, Awaitable[object]], + response_cls: type[_AsyncAPIResponseT], +) -> Callable[P, Awaitable[_AsyncAPIResponseT]]: + """Higher order function that takes one of our bound API methods and an `APIResponse` class + and wraps the method to support returning the given response class directly. + + Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` + """ + + @functools.wraps(func) + def wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[_AsyncAPIResponseT]: + extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})} + extra_headers[RAW_RESPONSE_HEADER] = "raw" + extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls + + kwargs["extra_headers"] = extra_headers + + return cast(Awaitable[_AsyncAPIResponseT], func(*args, **kwargs)) + + return wrapped + + +def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type: + """Given a type like `APIResponse[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyResponse(APIResponse[bytes]): + ... + + extract_response_type(MyResponse) -> bytes + ``` + """ + return extract_type_var_from_base( + typ, + generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse, AsyncAPIResponse)), + index=0, + ) diff --git a/src/digitalocean_genai_sdk/_streaming.py b/src/digitalocean_genai_sdk/_streaming.py new file mode 100644 index 00000000..96c3f3d3 --- /dev/null +++ b/src/digitalocean_genai_sdk/_streaming.py @@ -0,0 +1,333 @@ +# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py +from __future__ import annotations + +import json +import inspect +from types import TracebackType +from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast +from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable + +import httpx + +from ._utils import extract_type_var_from_base + +if TYPE_CHECKING: + from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + + +_T = TypeVar("_T") + + +class Stream(Generic[_T]): + """Provides the core interface to iterate over a synchronous stream response.""" + + response: httpx.Response + + _decoder: SSEBytesDecoder + + def __init__( + self, + *, + cast_to: type[_T], + response: httpx.Response, + client: DigitaloceanGenaiSDK, + ) -> None: + self.response = response + self._cast_to = cast_to + self._client = client + self._decoder = client._make_sse_decoder() + self._iterator = self.__stream__() + + def __next__(self) -> _T: + return self._iterator.__next__() + + def __iter__(self) -> Iterator[_T]: + for item in self._iterator: + yield item + + def _iter_events(self) -> Iterator[ServerSentEvent]: + yield from self._decoder.iter_bytes(self.response.iter_bytes()) + + def __stream__(self) -> Iterator[_T]: + cast_to = cast(Any, self._cast_to) + response = self.response + process_data = self._client._process_response_data + iterator = self._iter_events() + + for sse in iterator: + yield process_data(data=sse.json(), cast_to=cast_to, response=response) + + # Ensure the entire stream is consumed + for _sse in iterator: + ... + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + self.close() + + def close(self) -> None: + """ + Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + self.response.close() + + +class AsyncStream(Generic[_T]): + """Provides the core interface to iterate over an asynchronous stream response.""" + + response: httpx.Response + + _decoder: SSEDecoder | SSEBytesDecoder + + def __init__( + self, + *, + cast_to: type[_T], + response: httpx.Response, + client: AsyncDigitaloceanGenaiSDK, + ) -> None: + self.response = response + self._cast_to = cast_to + self._client = client + self._decoder = client._make_sse_decoder() + self._iterator = self.__stream__() + + async def __anext__(self) -> _T: + return await self._iterator.__anext__() + + async def __aiter__(self) -> AsyncIterator[_T]: + async for item in self._iterator: + yield item + + async def _iter_events(self) -> AsyncIterator[ServerSentEvent]: + async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()): + yield sse + + async def __stream__(self) -> AsyncIterator[_T]: + cast_to = cast(Any, self._cast_to) + response = self.response + process_data = self._client._process_response_data + iterator = self._iter_events() + + async for sse in iterator: + yield process_data(data=sse.json(), cast_to=cast_to, response=response) + + # Ensure the entire stream is consumed + async for _sse in iterator: + ... + + async def __aenter__(self) -> Self: + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + await self.close() + + async def close(self) -> None: + """ + Close the response and release the connection. + + Automatically called if the response body is read to completion. + """ + await self.response.aclose() + + +class ServerSentEvent: + def __init__( + self, + *, + event: str | None = None, + data: str | None = None, + id: str | None = None, + retry: int | None = None, + ) -> None: + if data is None: + data = "" + + self._id = id + self._data = data + self._event = event or None + self._retry = retry + + @property + def event(self) -> str | None: + return self._event + + @property + def id(self) -> str | None: + return self._id + + @property + def retry(self) -> int | None: + return self._retry + + @property + def data(self) -> str: + return self._data + + def json(self) -> Any: + return json.loads(self.data) + + @override + def __repr__(self) -> str: + return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})" + + +class SSEDecoder: + _data: list[str] + _event: str | None + _retry: int | None + _last_event_id: str | None + + def __init__(self) -> None: + self._event = None + self._data = [] + self._last_event_id = None + self._retry = None + + def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]: + """Given an iterator that yields raw binary data, iterate over it & yield every event encountered""" + for chunk in self._iter_chunks(iterator): + # Split before decoding so splitlines() only uses \r and \n + for raw_line in chunk.splitlines(): + line = raw_line.decode("utf-8") + sse = self.decode(line) + if sse: + yield sse + + def _iter_chunks(self, iterator: Iterator[bytes]) -> Iterator[bytes]: + """Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks""" + data = b"" + for chunk in iterator: + for line in chunk.splitlines(keepends=True): + data += line + if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")): + yield data + data = b"" + if data: + yield data + + async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]: + """Given an iterator that yields raw binary data, iterate over it & yield every event encountered""" + async for chunk in self._aiter_chunks(iterator): + # Split before decoding so splitlines() only uses \r and \n + for raw_line in chunk.splitlines(): + line = raw_line.decode("utf-8") + sse = self.decode(line) + if sse: + yield sse + + async def _aiter_chunks(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[bytes]: + """Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks""" + data = b"" + async for chunk in iterator: + for line in chunk.splitlines(keepends=True): + data += line + if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")): + yield data + data = b"" + if data: + yield data + + def decode(self, line: str) -> ServerSentEvent | None: + # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501 + + if not line: + if not self._event and not self._data and not self._last_event_id and self._retry is None: + return None + + sse = ServerSentEvent( + event=self._event, + data="\n".join(self._data), + id=self._last_event_id, + retry=self._retry, + ) + + # NOTE: as per the SSE spec, do not reset last_event_id. + self._event = None + self._data = [] + self._retry = None + + return sse + + if line.startswith(":"): + return None + + fieldname, _, value = line.partition(":") + + if value.startswith(" "): + value = value[1:] + + if fieldname == "event": + self._event = value + elif fieldname == "data": + self._data.append(value) + elif fieldname == "id": + if "\0" in value: + pass + else: + self._last_event_id = value + elif fieldname == "retry": + try: + self._retry = int(value) + except (TypeError, ValueError): + pass + else: + pass # Field is ignored. + + return None + + +@runtime_checkable +class SSEBytesDecoder(Protocol): + def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]: + """Given an iterator that yields raw binary data, iterate over it & yield every event encountered""" + ... + + def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]: + """Given an async iterator that yields raw binary data, iterate over it & yield every event encountered""" + ... + + +def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]: + """TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`""" + origin = get_origin(typ) or typ + return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream)) + + +def extract_stream_chunk_type( + stream_cls: type, + *, + failure_message: str | None = None, +) -> type: + """Given a type like `Stream[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyStream(Stream[bytes]): + ... + + extract_stream_chunk_type(MyStream) -> bytes + ``` + """ + from ._base_client import Stream, AsyncStream + + return extract_type_var_from_base( + stream_cls, + index=0, + generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)), + failure_message=failure_message, + ) diff --git a/src/digitalocean_genai_sdk/_types.py b/src/digitalocean_genai_sdk/_types.py new file mode 100644 index 00000000..b2bfbbec --- /dev/null +++ b/src/digitalocean_genai_sdk/_types.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +from os import PathLike +from typing import ( + IO, + TYPE_CHECKING, + Any, + Dict, + List, + Type, + Tuple, + Union, + Mapping, + TypeVar, + Callable, + Optional, + Sequence, +) +from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable + +import httpx +import pydantic +from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport + +if TYPE_CHECKING: + from ._models import BaseModel + from ._response import APIResponse, AsyncAPIResponse + +Transport = BaseTransport +AsyncTransport = AsyncBaseTransport +Query = Mapping[str, object] +Body = object +AnyMapping = Mapping[str, object] +ModelT = TypeVar("ModelT", bound=pydantic.BaseModel) +_T = TypeVar("_T") + + +# Approximates httpx internal ProxiesTypes and RequestFiles types +# while adding support for `PathLike` instances +ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]] +ProxiesTypes = Union[str, Proxy, ProxiesDict] +if TYPE_CHECKING: + Base64FileInput = Union[IO[bytes], PathLike[str]] + FileContent = Union[IO[bytes], bytes, PathLike[str]] +else: + Base64FileInput = Union[IO[bytes], PathLike] + FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8. +FileTypes = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], +] +RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]] + +# duplicate of the above but without our custom file support +HttpxFileContent = Union[IO[bytes], bytes] +HttpxFileTypes = Union[ + # file (or bytes) + HttpxFileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], HttpxFileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], HttpxFileContent, Optional[str]], + # (filename, file (or bytes), content_type, headers) + Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]], +] +HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]] + +# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT +# where ResponseT includes `None`. In order to support directly +# passing `None`, overloads would have to be defined for every +# method that uses `ResponseT` which would lead to an unacceptable +# amount of code duplication and make it unreadable. See _base_client.py +# for example usage. +# +# This unfortunately means that you will either have +# to import this type and pass it explicitly: +# +# from digitalocean_genai_sdk import NoneType +# client.get('/foo', cast_to=NoneType) +# +# or build it yourself: +# +# client.get('/foo', cast_to=type(None)) +if TYPE_CHECKING: + NoneType: Type[None] +else: + NoneType = type(None) + + +class RequestOptions(TypedDict, total=False): + headers: Headers + max_retries: int + timeout: float | Timeout | None + params: Query + extra_json: AnyMapping + idempotency_key: str + + +# Sentinel class used until PEP 0661 is accepted +class NotGiven: + """ + A sentinel singleton class used to distinguish omitted keyword arguments + from those passed in with the value None (which may have different behavior). + + For example: + + ```py + def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... + + + get(timeout=1) # 1s timeout + get(timeout=None) # No timeout + get() # Default timeout behavior, which may not be statically known at the method definition. + ``` + """ + + def __bool__(self) -> Literal[False]: + return False + + @override + def __repr__(self) -> str: + return "NOT_GIVEN" + + +NotGivenOr = Union[_T, NotGiven] +NOT_GIVEN = NotGiven() + + +class Omit: + """In certain situations you need to be able to represent a case where a default value has + to be explicitly removed and `None` is not an appropriate substitute, for example: + + ```py + # as the default `Content-Type` header is `application/json` that will be sent + client.post("/upload/files", files={"file": b"my raw file content"}) + + # you can't explicitly override the header as it has to be dynamically generated + # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983' + client.post(..., headers={"Content-Type": "multipart/form-data"}) + + # instead you can remove the default `application/json` header by passing Omit + client.post(..., headers={"Content-Type": Omit()}) + ``` + """ + + def __bool__(self) -> Literal[False]: + return False + + +@runtime_checkable +class ModelBuilderProtocol(Protocol): + @classmethod + def build( + cls: type[_T], + *, + response: Response, + data: object, + ) -> _T: ... + + +Headers = Mapping[str, Union[str, Omit]] + + +class HeadersLikeProtocol(Protocol): + def get(self, __key: str) -> str | None: ... + + +HeadersLike = Union[Headers, HeadersLikeProtocol] + +ResponseT = TypeVar( + "ResponseT", + bound=Union[ + object, + str, + None, + "BaseModel", + List[Any], + Dict[str, Any], + Response, + ModelBuilderProtocol, + "APIResponse[Any]", + "AsyncAPIResponse[Any]", + ], +) + +StrBytesIntFloat = Union[str, bytes, int, float] + +# Note: copied from Pydantic +# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79 +IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]] + +PostParser = Callable[[Any], Any] + + +@runtime_checkable +class InheritsGeneric(Protocol): + """Represents a type that has inherited from `Generic` + + The `__orig_bases__` property can be used to determine the resolved + type variable for a given base class. + """ + + __orig_bases__: tuple[_GenericAlias] + + +class _GenericAlias(Protocol): + __origin__: type[object] + + +class HttpxSendArgs(TypedDict, total=False): + auth: httpx.Auth diff --git a/src/digitalocean_genai_sdk/_utils/__init__.py b/src/digitalocean_genai_sdk/_utils/__init__.py new file mode 100644 index 00000000..d4fda26f --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/__init__.py @@ -0,0 +1,57 @@ +from ._sync import asyncify as asyncify +from ._proxy import LazyProxy as LazyProxy +from ._utils import ( + flatten as flatten, + is_dict as is_dict, + is_list as is_list, + is_given as is_given, + is_tuple as is_tuple, + json_safe as json_safe, + lru_cache as lru_cache, + is_mapping as is_mapping, + is_tuple_t as is_tuple_t, + parse_date as parse_date, + is_iterable as is_iterable, + is_sequence as is_sequence, + coerce_float as coerce_float, + is_mapping_t as is_mapping_t, + removeprefix as removeprefix, + removesuffix as removesuffix, + extract_files as extract_files, + is_sequence_t as is_sequence_t, + required_args as required_args, + coerce_boolean as coerce_boolean, + coerce_integer as coerce_integer, + file_from_path as file_from_path, + parse_datetime as parse_datetime, + strip_not_given as strip_not_given, + deepcopy_minimal as deepcopy_minimal, + get_async_library as get_async_library, + maybe_coerce_float as maybe_coerce_float, + get_required_header as get_required_header, + maybe_coerce_boolean as maybe_coerce_boolean, + maybe_coerce_integer as maybe_coerce_integer, +) +from ._typing import ( + is_list_type as is_list_type, + is_union_type as is_union_type, + extract_type_arg as extract_type_arg, + is_iterable_type as is_iterable_type, + is_required_type as is_required_type, + is_annotated_type as is_annotated_type, + is_type_alias_type as is_type_alias_type, + strip_annotated_type as strip_annotated_type, + extract_type_var_from_base as extract_type_var_from_base, +) +from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator +from ._transform import ( + PropertyInfo as PropertyInfo, + transform as transform, + async_transform as async_transform, + maybe_transform as maybe_transform, + async_maybe_transform as async_maybe_transform, +) +from ._reflection import ( + function_has_argument as function_has_argument, + assert_signatures_in_sync as assert_signatures_in_sync, +) diff --git a/src/digitalocean_genai_sdk/_utils/_logs.py b/src/digitalocean_genai_sdk/_utils/_logs.py new file mode 100644 index 00000000..e0c1fee5 --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_logs.py @@ -0,0 +1,25 @@ +import os +import logging + +logger: logging.Logger = logging.getLogger("digitalocean_genai_sdk") +httpx_logger: logging.Logger = logging.getLogger("httpx") + + +def _basic_config() -> None: + # e.g. [2023-10-05 14:12:26 - digitalocean_genai_sdk._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + logging.basicConfig( + format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + +def setup_logging() -> None: + env = os.environ.get("DIGITALOCEAN_GENAI_SDK_LOG") + if env == "debug": + _basic_config() + logger.setLevel(logging.DEBUG) + httpx_logger.setLevel(logging.DEBUG) + elif env == "info": + _basic_config() + logger.setLevel(logging.INFO) + httpx_logger.setLevel(logging.INFO) diff --git a/src/digitalocean_genai_sdk/_utils/_proxy.py b/src/digitalocean_genai_sdk/_utils/_proxy.py new file mode 100644 index 00000000..0f239a33 --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_proxy.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Generic, TypeVar, Iterable, cast +from typing_extensions import override + +T = TypeVar("T") + + +class LazyProxy(Generic[T], ABC): + """Implements data methods to pretend that an instance is another instance. + + This includes forwarding attribute access and other methods. + """ + + # Note: we have to special case proxies that themselves return proxies + # to support using a proxy as a catch-all for any random access, e.g. `proxy.foo.bar.baz` + + def __getattr__(self, attr: str) -> object: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return proxied # pyright: ignore + return getattr(proxied, attr) + + @override + def __repr__(self) -> str: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return proxied.__class__.__name__ + return repr(self.__get_proxied__()) + + @override + def __str__(self) -> str: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return proxied.__class__.__name__ + return str(proxied) + + @override + def __dir__(self) -> Iterable[str]: + proxied = self.__get_proxied__() + if isinstance(proxied, LazyProxy): + return [] + return proxied.__dir__() + + @property # type: ignore + @override + def __class__(self) -> type: # pyright: ignore + try: + proxied = self.__get_proxied__() + except Exception: + return type(self) + if issubclass(type(proxied), LazyProxy): + return type(proxied) + return proxied.__class__ + + def __get_proxied__(self) -> T: + return self.__load__() + + def __as_proxied__(self) -> T: + """Helper method that returns the current proxy, typed as the loaded object""" + return cast(T, self) + + @abstractmethod + def __load__(self) -> T: ... diff --git a/src/digitalocean_genai_sdk/_utils/_reflection.py b/src/digitalocean_genai_sdk/_utils/_reflection.py new file mode 100644 index 00000000..89aa712a --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_reflection.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import inspect +from typing import Any, Callable + + +def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool: + """Returns whether or not the given function has a specific parameter""" + sig = inspect.signature(func) + return arg_name in sig.parameters + + +def assert_signatures_in_sync( + source_func: Callable[..., Any], + check_func: Callable[..., Any], + *, + exclude_params: set[str] = set(), +) -> None: + """Ensure that the signature of the second function matches the first.""" + + check_sig = inspect.signature(check_func) + source_sig = inspect.signature(source_func) + + errors: list[str] = [] + + for name, source_param in source_sig.parameters.items(): + if name in exclude_params: + continue + + custom_param = check_sig.parameters.get(name) + if not custom_param: + errors.append(f"the `{name}` param is missing") + continue + + if custom_param.annotation != source_param.annotation: + errors.append( + f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}" + ) + continue + + if errors: + raise AssertionError(f"{len(errors)} errors encountered when comparing signatures:\n\n" + "\n\n".join(errors)) diff --git a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py b/src/digitalocean_genai_sdk/_utils/_resources_proxy.py new file mode 100644 index 00000000..4ebaf7a4 --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_resources_proxy.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import Any +from typing_extensions import override + +from ._proxy import LazyProxy + + +class ResourcesProxy(LazyProxy[Any]): + """A proxy for the `digitalocean_genai_sdk.resources` module. + + This is used so that we can lazily import `digitalocean_genai_sdk.resources` only when + needed *and* so that users can just import `digitalocean_genai_sdk` and reference `digitalocean_genai_sdk.resources` + """ + + @override + def __load__(self) -> Any: + import importlib + + mod = importlib.import_module("digitalocean_genai_sdk.resources") + return mod + + +resources = ResourcesProxy().__as_proxied__() diff --git a/src/digitalocean_genai_sdk/_utils/_streams.py b/src/digitalocean_genai_sdk/_utils/_streams.py new file mode 100644 index 00000000..f4a0208f --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_streams.py @@ -0,0 +1,12 @@ +from typing import Any +from typing_extensions import Iterator, AsyncIterator + + +def consume_sync_iterator(iterator: Iterator[Any]) -> None: + for _ in iterator: + ... + + +async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None: + async for _ in iterator: + ... diff --git a/src/digitalocean_genai_sdk/_utils/_sync.py b/src/digitalocean_genai_sdk/_utils/_sync.py new file mode 100644 index 00000000..ad7ec71b --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_sync.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import sys +import asyncio +import functools +import contextvars +from typing import Any, TypeVar, Callable, Awaitable +from typing_extensions import ParamSpec + +import anyio +import sniffio +import anyio.to_thread + +T_Retval = TypeVar("T_Retval") +T_ParamSpec = ParamSpec("T_ParamSpec") + + +if sys.version_info >= (3, 9): + _asyncio_to_thread = asyncio.to_thread +else: + # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread + # for Python 3.8 support + async def _asyncio_to_thread( + func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs + ) -> Any: + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Returns a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.events.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) + + +async def to_thread( + func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs +) -> T_Retval: + if sniffio.current_async_library() == "asyncio": + return await _asyncio_to_thread(func, *args, **kwargs) + + return await anyio.to_thread.run_sync( + functools.partial(func, *args, **kwargs), + ) + + +# inspired by `asyncer`, https://github.com/tiangolo/asyncer +def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: + """ + Take a blocking function and create an async one that receives the same + positional and keyword arguments. For python version 3.9 and above, it uses + asyncio.to_thread to run the function in a separate thread. For python version + 3.8, it uses locally defined copy of the asyncio.to_thread function which was + introduced in python 3.9. + + Usage: + + ```python + def blocking_func(arg1, arg2, kwarg1=None): + # blocking code + return result + + + result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1) + ``` + + ## Arguments + + `function`: a blocking regular callable (e.g. a function) + + ## Return + + An async function that takes the same positional and keyword arguments as the + original one, that when called runs the same original function in a thread worker + and returns the result. + """ + + async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: + return await to_thread(function, *args, **kwargs) + + return wrapper diff --git a/src/digitalocean_genai_sdk/_utils/_transform.py b/src/digitalocean_genai_sdk/_utils/_transform.py new file mode 100644 index 00000000..b0cc20a7 --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_transform.py @@ -0,0 +1,447 @@ +from __future__ import annotations + +import io +import base64 +import pathlib +from typing import Any, Mapping, TypeVar, cast +from datetime import date, datetime +from typing_extensions import Literal, get_args, override, get_type_hints as _get_type_hints + +import anyio +import pydantic + +from ._utils import ( + is_list, + is_given, + lru_cache, + is_mapping, + is_iterable, +) +from .._files import is_base64_file_input +from ._typing import ( + is_list_type, + is_union_type, + extract_type_arg, + is_iterable_type, + is_required_type, + is_annotated_type, + strip_annotated_type, +) +from .._compat import get_origin, model_dump, is_typeddict + +_T = TypeVar("_T") + + +# TODO: support for drilling globals() and locals() +# TODO: ensure works correctly with forward references in all cases + + +PropertyFormat = Literal["iso8601", "base64", "custom"] + + +class PropertyInfo: + """Metadata class to be used in Annotated types to provide information about a given type. + + For example: + + class MyParams(TypedDict): + account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')] + + This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API. + """ + + alias: str | None + format: PropertyFormat | None + format_template: str | None + discriminator: str | None + + def __init__( + self, + *, + alias: str | None = None, + format: PropertyFormat | None = None, + format_template: str | None = None, + discriminator: str | None = None, + ) -> None: + self.alias = alias + self.format = format + self.format_template = format_template + self.discriminator = discriminator + + @override + def __repr__(self) -> str: + return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')" + + +def maybe_transform( + data: object, + expected_type: object, +) -> Any | None: + """Wrapper over `transform()` that allows `None` to be passed. + + See `transform()` for more details. + """ + if data is None: + return None + return transform(data, expected_type) + + +# Wrapper over _transform_recursive providing fake types +def transform( + data: _T, + expected_type: object, +) -> _T: + """Transform dictionaries based off of type information from the given type, for example: + + ```py + class Params(TypedDict, total=False): + card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] + + + transformed = transform({"card_id": ""}, Params) + # {'cardID': ''} + ``` + + Any keys / data that does not have type information given will be included as is. + + It should be noted that the transformations that this function does are not represented in the type system. + """ + transformed = _transform_recursive(data, annotation=cast(type, expected_type)) + return cast(_T, transformed) + + +@lru_cache(maxsize=8096) +def _get_annotated_type(type_: type) -> type | None: + """If the given type is an `Annotated` type then it is returned, if not `None` is returned. + + This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]` + """ + if is_required_type(type_): + # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]` + type_ = get_args(type_)[0] + + if is_annotated_type(type_): + return type_ + + return None + + +def _maybe_transform_key(key: str, type_: type) -> str: + """Transform the given `data` based on the annotations provided in `type_`. + + Note: this function only looks at `Annotated` types that contain `PropertyInfo` metadata. + """ + annotated_type = _get_annotated_type(type_) + if annotated_type is None: + # no `Annotated` definition for this type, no transformation needed + return key + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.alias is not None: + return annotation.alias + + return key + + +def _no_transform_needed(annotation: type) -> bool: + return annotation == float or annotation == int + + +def _transform_recursive( + data: object, + *, + annotation: type, + inner_type: type | None = None, +) -> object: + """Transform the given data against the expected type. + + Args: + annotation: The direct type annotation given to the particular piece of data. + This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc + + inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type + is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in + the list can be transformed using the metadata from the container type. + + Defaults to the same value as the `annotation` argument. + """ + if inner_type is None: + inner_type = annotation + + stripped_type = strip_annotated_type(inner_type) + origin = get_origin(stripped_type) or stripped_type + if is_typeddict(stripped_type) and is_mapping(data): + return _transform_typeddict(data, stripped_type) + + if origin == dict and is_mapping(data): + items_type = get_args(stripped_type)[1] + return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()} + + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + + inner_type = extract_type_arg(stripped_type, 0) + if _no_transform_needed(inner_type): + # for some types there is no need to transform anything, so we can get a small + # perf boost from skipping that work. + # + # but we still need to convert to a list to ensure the data is json-serializable + if is_list(data): + return data + return list(data) + + return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] + + if is_union_type(stripped_type): + # For union types we run the transformation against all subtypes to ensure that everything is transformed. + # + # TODO: there may be edge cases where the same normalized field name will transform to two different names + # in different subtypes. + for subtype in get_args(stripped_type): + data = _transform_recursive(data, annotation=annotation, inner_type=subtype) + return data + + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True, mode="json") + + annotated_type = _get_annotated_type(annotation) + if annotated_type is None: + return data + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.format is not None: + return _format_data(data, annotation.format, annotation.format_template) + + return data + + +def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: + if isinstance(data, (date, datetime)): + if format_ == "iso8601": + return data.isoformat() + + if format_ == "custom" and format_template is not None: + return data.strftime(format_template) + + if format_ == "base64" and is_base64_file_input(data): + binary: str | bytes | None = None + + if isinstance(data, pathlib.Path): + binary = data.read_bytes() + elif isinstance(data, io.IOBase): + binary = data.read() + + if isinstance(binary, str): # type: ignore[unreachable] + binary = binary.encode() + + if not isinstance(binary, bytes): + raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") + + return base64.b64encode(binary).decode("ascii") + + return data + + +def _transform_typeddict( + data: Mapping[str, object], + expected_type: type, +) -> Mapping[str, object]: + result: dict[str, object] = {} + annotations = get_type_hints(expected_type, include_extras=True) + for key, value in data.items(): + if not is_given(value): + # we don't need to include `NotGiven` values here as they'll + # be stripped out before the request is sent anyway + continue + + type_ = annotations.get(key) + if type_ is None: + # we do not have a type annotation for this field, leave it as is + result[key] = value + else: + result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_) + return result + + +async def async_maybe_transform( + data: object, + expected_type: object, +) -> Any | None: + """Wrapper over `async_transform()` that allows `None` to be passed. + + See `async_transform()` for more details. + """ + if data is None: + return None + return await async_transform(data, expected_type) + + +async def async_transform( + data: _T, + expected_type: object, +) -> _T: + """Transform dictionaries based off of type information from the given type, for example: + + ```py + class Params(TypedDict, total=False): + card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] + + + transformed = transform({"card_id": ""}, Params) + # {'cardID': ''} + ``` + + Any keys / data that does not have type information given will be included as is. + + It should be noted that the transformations that this function does are not represented in the type system. + """ + transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type)) + return cast(_T, transformed) + + +async def _async_transform_recursive( + data: object, + *, + annotation: type, + inner_type: type | None = None, +) -> object: + """Transform the given data against the expected type. + + Args: + annotation: The direct type annotation given to the particular piece of data. + This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc + + inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type + is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in + the list can be transformed using the metadata from the container type. + + Defaults to the same value as the `annotation` argument. + """ + if inner_type is None: + inner_type = annotation + + stripped_type = strip_annotated_type(inner_type) + origin = get_origin(stripped_type) or stripped_type + if is_typeddict(stripped_type) and is_mapping(data): + return await _async_transform_typeddict(data, stripped_type) + + if origin == dict and is_mapping(data): + items_type = get_args(stripped_type)[1] + return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()} + + if ( + # List[T] + (is_list_type(stripped_type) and is_list(data)) + # Iterable[T] + or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + + inner_type = extract_type_arg(stripped_type, 0) + if _no_transform_needed(inner_type): + # for some types there is no need to transform anything, so we can get a small + # perf boost from skipping that work. + # + # but we still need to convert to a list to ensure the data is json-serializable + if is_list(data): + return data + return list(data) + + return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] + + if is_union_type(stripped_type): + # For union types we run the transformation against all subtypes to ensure that everything is transformed. + # + # TODO: there may be edge cases where the same normalized field name will transform to two different names + # in different subtypes. + for subtype in get_args(stripped_type): + data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype) + return data + + if isinstance(data, pydantic.BaseModel): + return model_dump(data, exclude_unset=True, mode="json") + + annotated_type = _get_annotated_type(annotation) + if annotated_type is None: + return data + + # ignore the first argument as it is the actual type + annotations = get_args(annotated_type)[1:] + for annotation in annotations: + if isinstance(annotation, PropertyInfo) and annotation.format is not None: + return await _async_format_data(data, annotation.format, annotation.format_template) + + return data + + +async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: + if isinstance(data, (date, datetime)): + if format_ == "iso8601": + return data.isoformat() + + if format_ == "custom" and format_template is not None: + return data.strftime(format_template) + + if format_ == "base64" and is_base64_file_input(data): + binary: str | bytes | None = None + + if isinstance(data, pathlib.Path): + binary = await anyio.Path(data).read_bytes() + elif isinstance(data, io.IOBase): + binary = data.read() + + if isinstance(binary, str): # type: ignore[unreachable] + binary = binary.encode() + + if not isinstance(binary, bytes): + raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") + + return base64.b64encode(binary).decode("ascii") + + return data + + +async def _async_transform_typeddict( + data: Mapping[str, object], + expected_type: type, +) -> Mapping[str, object]: + result: dict[str, object] = {} + annotations = get_type_hints(expected_type, include_extras=True) + for key, value in data.items(): + if not is_given(value): + # we don't need to include `NotGiven` values here as they'll + # be stripped out before the request is sent anyway + continue + + type_ = annotations.get(key) + if type_ is None: + # we do not have a type annotation for this field, leave it as is + result[key] = value + else: + result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_) + return result + + +@lru_cache(maxsize=8096) +def get_type_hints( + obj: Any, + globalns: dict[str, Any] | None = None, + localns: Mapping[str, Any] | None = None, + include_extras: bool = False, +) -> dict[str, Any]: + return _get_type_hints(obj, globalns=globalns, localns=localns, include_extras=include_extras) diff --git a/src/digitalocean_genai_sdk/_utils/_typing.py b/src/digitalocean_genai_sdk/_utils/_typing.py new file mode 100644 index 00000000..1bac9542 --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_typing.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +import sys +import typing +import typing_extensions +from typing import Any, TypeVar, Iterable, cast +from collections import abc as _c_abc +from typing_extensions import ( + TypeIs, + Required, + Annotated, + get_args, + get_origin, +) + +from ._utils import lru_cache +from .._types import InheritsGeneric +from .._compat import is_union as _is_union + + +def is_annotated_type(typ: type) -> bool: + return get_origin(typ) == Annotated + + +def is_list_type(typ: type) -> bool: + return (get_origin(typ) or typ) == list + + +def is_iterable_type(typ: type) -> bool: + """If the given type is `typing.Iterable[T]`""" + origin = get_origin(typ) or typ + return origin == Iterable or origin == _c_abc.Iterable + + +def is_union_type(typ: type) -> bool: + return _is_union(get_origin(typ)) + + +def is_required_type(typ: type) -> bool: + return get_origin(typ) == Required + + +def is_typevar(typ: type) -> bool: + # type ignore is required because type checkers + # think this expression will always return False + return type(typ) == TypeVar # type: ignore + + +_TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,) +if sys.version_info >= (3, 12): + _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType) + + +def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]: + """Return whether the provided argument is an instance of `TypeAliasType`. + + ```python + type Int = int + is_type_alias_type(Int) + # > True + Str = TypeAliasType("Str", str) + is_type_alias_type(Str) + # > True + ``` + """ + return isinstance(tp, _TYPE_ALIAS_TYPES) + + +# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] +@lru_cache(maxsize=8096) +def strip_annotated_type(typ: type) -> type: + if is_required_type(typ) or is_annotated_type(typ): + return strip_annotated_type(cast(type, get_args(typ)[0])) + + return typ + + +def extract_type_arg(typ: type, index: int) -> type: + args = get_args(typ) + try: + return cast(type, args[index]) + except IndexError as err: + raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err + + +def extract_type_var_from_base( + typ: type, + *, + generic_bases: tuple[type, ...], + index: int, + failure_message: str | None = None, +) -> type: + """Given a type like `Foo[T]`, returns the generic type variable `T`. + + This also handles the case where a concrete subclass is given, e.g. + ```py + class MyResponse(Foo[bytes]): + ... + + extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes + ``` + + And where a generic subclass is given: + ```py + _T = TypeVar('_T') + class MyResponse(Foo[_T]): + ... + + extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes + ``` + """ + cls = cast(object, get_origin(typ) or typ) + if cls in generic_bases: # pyright: ignore[reportUnnecessaryContains] + # we're given the class directly + return extract_type_arg(typ, index) + + # if a subclass is given + # --- + # this is needed as __orig_bases__ is not present in the typeshed stubs + # because it is intended to be for internal use only, however there does + # not seem to be a way to resolve generic TypeVars for inherited subclasses + # without using it. + if isinstance(cls, InheritsGeneric): + target_base_class: Any | None = None + for base in cls.__orig_bases__: + if base.__origin__ in generic_bases: + target_base_class = base + break + + if target_base_class is None: + raise RuntimeError( + "Could not find the generic base class;\n" + "This should never happen;\n" + f"Does {cls} inherit from one of {generic_bases} ?" + ) + + extracted = extract_type_arg(target_base_class, index) + if is_typevar(extracted): + # If the extracted type argument is itself a type variable + # then that means the subclass itself is generic, so we have + # to resolve the type argument from the class itself, not + # the base class. + # + # Note: if there is more than 1 type argument, the subclass could + # change the ordering of the type arguments, this is not currently + # supported. + return extract_type_arg(typ, index) + + return extracted + + raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}") diff --git a/src/digitalocean_genai_sdk/_utils/_utils.py b/src/digitalocean_genai_sdk/_utils/_utils.py new file mode 100644 index 00000000..ea3cf3f2 --- /dev/null +++ b/src/digitalocean_genai_sdk/_utils/_utils.py @@ -0,0 +1,422 @@ +from __future__ import annotations + +import os +import re +import inspect +import functools +from typing import ( + Any, + Tuple, + Mapping, + TypeVar, + Callable, + Iterable, + Sequence, + cast, + overload, +) +from pathlib import Path +from datetime import date, datetime +from typing_extensions import TypeGuard + +import sniffio + +from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike +from .._compat import parse_date as parse_date, parse_datetime as parse_datetime + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) +_MappingT = TypeVar("_MappingT", bound=Mapping[str, object]) +_SequenceT = TypeVar("_SequenceT", bound=Sequence[object]) +CallableT = TypeVar("CallableT", bound=Callable[..., Any]) + + +def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: + return [item for sublist in t for item in sublist] + + +def extract_files( + # TODO: this needs to take Dict but variance issues..... + # create protocol type ? + query: Mapping[str, object], + *, + paths: Sequence[Sequence[str]], +) -> list[tuple[str, FileTypes]]: + """Recursively extract files from the given dictionary based on specified paths. + + A path may look like this ['foo', 'files', '', 'data']. + + Note: this mutates the given dictionary. + """ + files: list[tuple[str, FileTypes]] = [] + for path in paths: + files.extend(_extract_items(query, path, index=0, flattened_key=None)) + return files + + +def _extract_items( + obj: object, + path: Sequence[str], + *, + index: int, + flattened_key: str | None, +) -> list[tuple[str, FileTypes]]: + try: + key = path[index] + except IndexError: + if isinstance(obj, NotGiven): + # no value was provided - we can safely ignore + return [] + + # cyclical import + from .._files import assert_is_file_content + + # We have exhausted the path, return the entry we found. + assert flattened_key is not None + + if is_list(obj): + files: list[tuple[str, FileTypes]] = [] + for entry in obj: + assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "") + files.append((flattened_key + "[]", cast(FileTypes, entry))) + return files + + assert_is_file_content(obj, key=flattened_key) + return [(flattened_key, cast(FileTypes, obj))] + + index += 1 + if is_dict(obj): + try: + # We are at the last entry in the path so we must remove the field + if (len(path)) == index: + item = obj.pop(key) + else: + item = obj[key] + except KeyError: + # Key was not present in the dictionary, this is not indicative of an error + # as the given path may not point to a required field. We also do not want + # to enforce required fields as the API may differ from the spec in some cases. + return [] + if flattened_key is None: + flattened_key = key + else: + flattened_key += f"[{key}]" + return _extract_items( + item, + path, + index=index, + flattened_key=flattened_key, + ) + elif is_list(obj): + if key != "": + return [] + + return flatten( + [ + _extract_items( + item, + path, + index=index, + flattened_key=flattened_key + "[]" if flattened_key is not None else "[]", + ) + for item in obj + ] + ) + + # Something unexpected was passed, just ignore it. + return [] + + +def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]: + return not isinstance(obj, NotGiven) + + +# Type safe methods for narrowing types with TypeVars. +# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], +# however this cause Pyright to rightfully report errors. As we know we don't +# care about the contained types we can safely use `object` in it's place. +# +# There are two separate functions defined, `is_*` and `is_*_t` for different use cases. +# `is_*` is for when you're dealing with an unknown input +# `is_*_t` is for when you're narrowing a known union type to a specific subset + + +def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]: + return isinstance(obj, tuple) + + +def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]: + return isinstance(obj, tuple) + + +def is_sequence(obj: object) -> TypeGuard[Sequence[object]]: + return isinstance(obj, Sequence) + + +def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]: + return isinstance(obj, Sequence) + + +def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]: + return isinstance(obj, Mapping) + + +def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]: + return isinstance(obj, Mapping) + + +def is_dict(obj: object) -> TypeGuard[dict[object, object]]: + return isinstance(obj, dict) + + +def is_list(obj: object) -> TypeGuard[list[object]]: + return isinstance(obj, list) + + +def is_iterable(obj: object) -> TypeGuard[Iterable[object]]: + return isinstance(obj, Iterable) + + +def deepcopy_minimal(item: _T) -> _T: + """Minimal reimplementation of copy.deepcopy() that will only copy certain object types: + + - mappings, e.g. `dict` + - list + + This is done for performance reasons. + """ + if is_mapping(item): + return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()}) + if is_list(item): + return cast(_T, [deepcopy_minimal(entry) for entry in item]) + return item + + +# copied from https://github.com/Rapptz/RoboDanny +def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str: + size = len(seq) + if size == 0: + return "" + + if size == 1: + return seq[0] + + if size == 2: + return f"{seq[0]} {final} {seq[1]}" + + return delim.join(seq[:-1]) + f" {final} {seq[-1]}" + + +def quote(string: str) -> str: + """Add single quotation marks around the given string. Does *not* do any escaping.""" + return f"'{string}'" + + +def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]: + """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function. + + Useful for enforcing runtime validation of overloaded functions. + + Example usage: + ```py + @overload + def foo(*, a: str) -> str: ... + + + @overload + def foo(*, b: bool) -> str: ... + + + # This enforces the same constraints that a static type checker would + # i.e. that either a or b must be passed to the function + @required_args(["a"], ["b"]) + def foo(*, a: str | None = None, b: bool | None = None) -> str: ... + ``` + """ + + def inner(func: CallableT) -> CallableT: + params = inspect.signature(func).parameters + positional = [ + name + for name, param in params.items() + if param.kind + in { + param.POSITIONAL_ONLY, + param.POSITIONAL_OR_KEYWORD, + } + ] + + @functools.wraps(func) + def wrapper(*args: object, **kwargs: object) -> object: + given_params: set[str] = set() + for i, _ in enumerate(args): + try: + given_params.add(positional[i]) + except IndexError: + raise TypeError( + f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given" + ) from None + + for key in kwargs.keys(): + given_params.add(key) + + for variant in variants: + matches = all((param in given_params for param in variant)) + if matches: + break + else: # no break + if len(variants) > 1: + variations = human_join( + ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants] + ) + msg = f"Missing required arguments; Expected either {variations} arguments to be given" + else: + assert len(variants) > 0 + + # TODO: this error message is not deterministic + missing = list(set(variants[0]) - given_params) + if len(missing) > 1: + msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}" + else: + msg = f"Missing required argument: {quote(missing[0])}" + raise TypeError(msg) + return func(*args, **kwargs) + + return wrapper # type: ignore + + return inner + + +_K = TypeVar("_K") +_V = TypeVar("_V") + + +@overload +def strip_not_given(obj: None) -> None: ... + + +@overload +def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ... + + +@overload +def strip_not_given(obj: object) -> object: ... + + +def strip_not_given(obj: object | None) -> object: + """Remove all top-level keys where their values are instances of `NotGiven`""" + if obj is None: + return None + + if not is_mapping(obj): + return obj + + return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} + + +def coerce_integer(val: str) -> int: + return int(val, base=10) + + +def coerce_float(val: str) -> float: + return float(val) + + +def coerce_boolean(val: str) -> bool: + return val == "true" or val == "1" or val == "on" + + +def maybe_coerce_integer(val: str | None) -> int | None: + if val is None: + return None + return coerce_integer(val) + + +def maybe_coerce_float(val: str | None) -> float | None: + if val is None: + return None + return coerce_float(val) + + +def maybe_coerce_boolean(val: str | None) -> bool | None: + if val is None: + return None + return coerce_boolean(val) + + +def removeprefix(string: str, prefix: str) -> str: + """Remove a prefix from a string. + + Backport of `str.removeprefix` for Python < 3.9 + """ + if string.startswith(prefix): + return string[len(prefix) :] + return string + + +def removesuffix(string: str, suffix: str) -> str: + """Remove a suffix from a string. + + Backport of `str.removesuffix` for Python < 3.9 + """ + if string.endswith(suffix): + return string[: -len(suffix)] + return string + + +def file_from_path(path: str) -> FileTypes: + contents = Path(path).read_bytes() + file_name = os.path.basename(path) + return (file_name, contents) + + +def get_required_header(headers: HeadersLike, header: str) -> str: + lower_header = header.lower() + if is_mapping_t(headers): + # mypy doesn't understand the type narrowing here + for k, v in headers.items(): # type: ignore + if k.lower() == lower_header and isinstance(v, str): + return v + + # to deal with the case where the header looks like Stainless-Event-Id + intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) + + for normalized_header in [header, lower_header, header.upper(), intercaps_header]: + value = headers.get(normalized_header) + if value: + return value + + raise ValueError(f"Could not find {header} header") + + +def get_async_library() -> str: + try: + return sniffio.current_async_library() + except Exception: + return "false" + + +def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]: + """A version of functools.lru_cache that retains the type signature + for the wrapped function arguments. + """ + wrapper = functools.lru_cache( # noqa: TID251 + maxsize=maxsize, + ) + return cast(Any, wrapper) # type: ignore[no-any-return] + + +def json_safe(data: object) -> object: + """Translates a mapping / sequence recursively in the same fashion + as `pydantic` v2's `model_dump(mode="json")`. + """ + if is_mapping(data): + return {json_safe(key): json_safe(value) for key, value in data.items()} + + if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)): + return [json_safe(item) for item in data] + + if isinstance(data, (datetime, date)): + return data.isoformat() + + return data diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py new file mode 100644 index 00000000..5c4fa53a --- /dev/null +++ b/src/digitalocean_genai_sdk/_version.py @@ -0,0 +1,4 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +__title__ = "digitalocean_genai_sdk" +__version__ = "0.0.1-alpha.0" diff --git a/src/digitalocean_genai_sdk/lib/.keep b/src/digitalocean_genai_sdk/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/digitalocean_genai_sdk/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/digitalocean_genai_sdk/py.typed b/src/digitalocean_genai_sdk/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py new file mode 100644 index 00000000..237b0ca7 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/__init__.py @@ -0,0 +1,243 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .audio import ( + AudioResource, + AsyncAudioResource, + AudioResourceWithRawResponse, + AsyncAudioResourceWithRawResponse, + AudioResourceWithStreamingResponse, + AsyncAudioResourceWithStreamingResponse, +) +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .batches import ( + BatchesResource, + AsyncBatchesResource, + BatchesResourceWithRawResponse, + AsyncBatchesResourceWithRawResponse, + BatchesResourceWithStreamingResponse, + AsyncBatchesResourceWithStreamingResponse, +) +from .threads import ( + ThreadsResource, + AsyncThreadsResource, + ThreadsResourceWithRawResponse, + AsyncThreadsResourceWithRawResponse, + ThreadsResourceWithStreamingResponse, + AsyncThreadsResourceWithStreamingResponse, +) +from .uploads import ( + UploadsResource, + AsyncUploadsResource, + UploadsResourceWithRawResponse, + AsyncUploadsResourceWithRawResponse, + UploadsResourceWithStreamingResponse, + AsyncUploadsResourceWithStreamingResponse, +) +from .realtime import ( + RealtimeResource, + AsyncRealtimeResource, + RealtimeResourceWithRawResponse, + AsyncRealtimeResourceWithRawResponse, + RealtimeResourceWithStreamingResponse, + AsyncRealtimeResourceWithStreamingResponse, +) +from .responses import ( + ResponsesResource, + AsyncResponsesResource, + ResponsesResourceWithRawResponse, + AsyncResponsesResourceWithRawResponse, + ResponsesResourceWithStreamingResponse, + AsyncResponsesResourceWithStreamingResponse, +) +from .assistants import ( + AssistantsResource, + AsyncAssistantsResource, + AssistantsResourceWithRawResponse, + AsyncAssistantsResourceWithRawResponse, + AssistantsResourceWithStreamingResponse, + AsyncAssistantsResourceWithStreamingResponse, +) +from .embeddings import ( + EmbeddingsResource, + AsyncEmbeddingsResource, + EmbeddingsResourceWithRawResponse, + AsyncEmbeddingsResourceWithRawResponse, + EmbeddingsResourceWithStreamingResponse, + AsyncEmbeddingsResourceWithStreamingResponse, +) +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) +from .fine_tuning import ( + FineTuningResource, + AsyncFineTuningResource, + FineTuningResourceWithRawResponse, + AsyncFineTuningResourceWithRawResponse, + FineTuningResourceWithStreamingResponse, + AsyncFineTuningResourceWithStreamingResponse, +) +from .moderations import ( + ModerationsResource, + AsyncModerationsResource, + ModerationsResourceWithRawResponse, + AsyncModerationsResourceWithRawResponse, + ModerationsResourceWithStreamingResponse, + AsyncModerationsResourceWithStreamingResponse, +) +from .organization import ( + OrganizationResource, + AsyncOrganizationResource, + OrganizationResourceWithRawResponse, + AsyncOrganizationResourceWithRawResponse, + OrganizationResourceWithStreamingResponse, + AsyncOrganizationResourceWithStreamingResponse, +) +from .vector_stores import ( + VectorStoresResource, + AsyncVectorStoresResource, + VectorStoresResourceWithRawResponse, + AsyncVectorStoresResourceWithRawResponse, + VectorStoresResourceWithStreamingResponse, + AsyncVectorStoresResourceWithStreamingResponse, +) + +__all__ = [ + "AssistantsResource", + "AsyncAssistantsResource", + "AssistantsResourceWithRawResponse", + "AsyncAssistantsResourceWithRawResponse", + "AssistantsResourceWithStreamingResponse", + "AsyncAssistantsResourceWithStreamingResponse", + "AudioResource", + "AsyncAudioResource", + "AudioResourceWithRawResponse", + "AsyncAudioResourceWithRawResponse", + "AudioResourceWithStreamingResponse", + "AsyncAudioResourceWithStreamingResponse", + "BatchesResource", + "AsyncBatchesResource", + "BatchesResourceWithRawResponse", + "AsyncBatchesResourceWithRawResponse", + "BatchesResourceWithStreamingResponse", + "AsyncBatchesResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", + "CompletionsResource", + "AsyncCompletionsResource", + "CompletionsResourceWithRawResponse", + "AsyncCompletionsResourceWithRawResponse", + "CompletionsResourceWithStreamingResponse", + "AsyncCompletionsResourceWithStreamingResponse", + "EmbeddingsResource", + "AsyncEmbeddingsResource", + "EmbeddingsResourceWithRawResponse", + "AsyncEmbeddingsResourceWithRawResponse", + "EmbeddingsResourceWithStreamingResponse", + "AsyncEmbeddingsResourceWithStreamingResponse", + "FilesResource", + "AsyncFilesResource", + "FilesResourceWithRawResponse", + "AsyncFilesResourceWithRawResponse", + "FilesResourceWithStreamingResponse", + "AsyncFilesResourceWithStreamingResponse", + "FineTuningResource", + "AsyncFineTuningResource", + "FineTuningResourceWithRawResponse", + "AsyncFineTuningResourceWithRawResponse", + "FineTuningResourceWithStreamingResponse", + "AsyncFineTuningResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", + "ModerationsResource", + "AsyncModerationsResource", + "ModerationsResourceWithRawResponse", + "AsyncModerationsResourceWithRawResponse", + "ModerationsResourceWithStreamingResponse", + "AsyncModerationsResourceWithStreamingResponse", + "OrganizationResource", + "AsyncOrganizationResource", + "OrganizationResourceWithRawResponse", + "AsyncOrganizationResourceWithRawResponse", + "OrganizationResourceWithStreamingResponse", + "AsyncOrganizationResourceWithStreamingResponse", + "RealtimeResource", + "AsyncRealtimeResource", + "RealtimeResourceWithRawResponse", + "AsyncRealtimeResourceWithRawResponse", + "RealtimeResourceWithStreamingResponse", + "AsyncRealtimeResourceWithStreamingResponse", + "ResponsesResource", + "AsyncResponsesResource", + "ResponsesResourceWithRawResponse", + "AsyncResponsesResourceWithRawResponse", + "ResponsesResourceWithStreamingResponse", + "AsyncResponsesResourceWithStreamingResponse", + "ThreadsResource", + "AsyncThreadsResource", + "ThreadsResourceWithRawResponse", + "AsyncThreadsResourceWithRawResponse", + "ThreadsResourceWithStreamingResponse", + "AsyncThreadsResourceWithStreamingResponse", + "UploadsResource", + "AsyncUploadsResource", + "UploadsResourceWithRawResponse", + "AsyncUploadsResourceWithRawResponse", + "UploadsResourceWithStreamingResponse", + "AsyncUploadsResourceWithStreamingResponse", + "VectorStoresResource", + "AsyncVectorStoresResource", + "VectorStoresResourceWithRawResponse", + "AsyncVectorStoresResourceWithRawResponse", + "VectorStoresResourceWithStreamingResponse", + "AsyncVectorStoresResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/assistants.py b/src/digitalocean_genai_sdk/resources/assistants.py new file mode 100644 index 00000000..c6ae36f5 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/assistants.py @@ -0,0 +1,910 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from ..types import ( + ReasoningEffort, + assistant_list_params, + assistant_create_params, + assistant_update_params, +) +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.assistant_object import AssistantObject +from ..types.reasoning_effort import ReasoningEffort +from ..types.assistant_list_response import AssistantListResponse +from ..types.assistant_delete_response import AssistantDeleteResponse +from ..types.assistant_supported_models import AssistantSupportedModels +from ..types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam + +__all__ = ["AssistantsResource", "AsyncAssistantsResource"] + + +class AssistantsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AssistantsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AssistantsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AssistantsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AssistantsResourceWithStreamingResponse(self) + + def create( + self, + *, + model: Union[str, AssistantSupportedModels], + description: Optional[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantObject: + """ + Create an assistant with a model and instructions. + + Args: + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + description: The description of the assistant. The maximum length is 512 characters. + + instructions: The system instructions that the assistant uses. The maximum length is 256,000 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + name: The name of the assistant. The maximum length is 256 characters. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_resources: A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. For example, the `code_interpreter` tool requires + a list of file IDs, while the `file_search` tool requires a list of vector store + IDs. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `file_search`, or + `function`. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/assistants", + body=maybe_transform( + { + "model": model, + "description": description, + "instructions": instructions, + "metadata": metadata, + "name": name, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + }, + assistant_create_params.AssistantCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantObject, + ) + + def retrieve( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantObject: + """ + Retrieves an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not assistant_id: + raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") + return self._get( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantObject, + ) + + def update( + self, + assistant_id: str, + *, + description: Optional[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantObject: + """Modifies an assistant. + + Args: + description: The description of the assistant. + + The maximum length is 512 characters. + + instructions: The system instructions that the assistant uses. The maximum length is 256,000 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + name: The name of the assistant. The maximum length is 256 characters. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_resources: A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. For example, the `code_interpreter` tool requires + a list of file IDs, while the `file_search` tool requires a list of vector store + IDs. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `file_search`, or + `function`. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not assistant_id: + raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") + return self._post( + f"/assistants/{assistant_id}", + body=maybe_transform( + { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + }, + assistant_update_params.AssistantUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantObject, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantListResponse: + """Returns a list of assistants. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/assistants", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + assistant_list_params.AssistantListParams, + ), + ), + cast_to=AssistantListResponse, + ) + + def delete( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantDeleteResponse: + """ + Delete an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not assistant_id: + raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") + return self._delete( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantDeleteResponse, + ) + + +class AsyncAssistantsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAssistantsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAssistantsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAssistantsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAssistantsResourceWithStreamingResponse(self) + + async def create( + self, + *, + model: Union[str, AssistantSupportedModels], + description: Optional[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantObject: + """ + Create an assistant with a model and instructions. + + Args: + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + description: The description of the assistant. The maximum length is 512 characters. + + instructions: The system instructions that the assistant uses. The maximum length is 256,000 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + name: The name of the assistant. The maximum length is 256 characters. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_resources: A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. For example, the `code_interpreter` tool requires + a list of file IDs, while the `file_search` tool requires a list of vector store + IDs. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `file_search`, or + `function`. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/assistants", + body=await async_maybe_transform( + { + "model": model, + "description": description, + "instructions": instructions, + "metadata": metadata, + "name": name, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + }, + assistant_create_params.AssistantCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantObject, + ) + + async def retrieve( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantObject: + """ + Retrieves an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not assistant_id: + raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") + return await self._get( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantObject, + ) + + async def update( + self, + assistant_id: str, + *, + description: Optional[str] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, + tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantObject: + """Modifies an assistant. + + Args: + description: The description of the assistant. + + The maximum length is 512 characters. + + instructions: The system instructions that the assistant uses. The maximum length is 256,000 + characters. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + name: The name of the assistant. The maximum length is 256 characters. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_resources: A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. For example, the `code_interpreter` tool requires + a list of file IDs, while the `file_search` tool requires a list of vector store + IDs. + + tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per + assistant. Tools can be of types `code_interpreter`, `file_search`, or + `function`. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not assistant_id: + raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") + return await self._post( + f"/assistants/{assistant_id}", + body=await async_maybe_transform( + { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + }, + assistant_update_params.AssistantUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantObject, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantListResponse: + """Returns a list of assistants. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/assistants", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + assistant_list_params.AssistantListParams, + ), + ), + cast_to=AssistantListResponse, + ) + + async def delete( + self, + assistant_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AssistantDeleteResponse: + """ + Delete an assistant. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not assistant_id: + raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") + return await self._delete( + f"/assistants/{assistant_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AssistantDeleteResponse, + ) + + +class AssistantsResourceWithRawResponse: + def __init__(self, assistants: AssistantsResource) -> None: + self._assistants = assistants + + self.create = to_raw_response_wrapper( + assistants.create, + ) + self.retrieve = to_raw_response_wrapper( + assistants.retrieve, + ) + self.update = to_raw_response_wrapper( + assistants.update, + ) + self.list = to_raw_response_wrapper( + assistants.list, + ) + self.delete = to_raw_response_wrapper( + assistants.delete, + ) + + +class AsyncAssistantsResourceWithRawResponse: + def __init__(self, assistants: AsyncAssistantsResource) -> None: + self._assistants = assistants + + self.create = async_to_raw_response_wrapper( + assistants.create, + ) + self.retrieve = async_to_raw_response_wrapper( + assistants.retrieve, + ) + self.update = async_to_raw_response_wrapper( + assistants.update, + ) + self.list = async_to_raw_response_wrapper( + assistants.list, + ) + self.delete = async_to_raw_response_wrapper( + assistants.delete, + ) + + +class AssistantsResourceWithStreamingResponse: + def __init__(self, assistants: AssistantsResource) -> None: + self._assistants = assistants + + self.create = to_streamed_response_wrapper( + assistants.create, + ) + self.retrieve = to_streamed_response_wrapper( + assistants.retrieve, + ) + self.update = to_streamed_response_wrapper( + assistants.update, + ) + self.list = to_streamed_response_wrapper( + assistants.list, + ) + self.delete = to_streamed_response_wrapper( + assistants.delete, + ) + + +class AsyncAssistantsResourceWithStreamingResponse: + def __init__(self, assistants: AsyncAssistantsResource) -> None: + self._assistants = assistants + + self.create = async_to_streamed_response_wrapper( + assistants.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + assistants.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + assistants.update, + ) + self.list = async_to_streamed_response_wrapper( + assistants.list, + ) + self.delete = async_to_streamed_response_wrapper( + assistants.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/audio.py b/src/digitalocean_genai_sdk/resources/audio.py new file mode 100644 index 00000000..7cecbe6d --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/audio.py @@ -0,0 +1,650 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, List, Union, Mapping, Optional, cast +from typing_extensions import Literal + +import httpx + +from ..types import ( + audio_generate_speech_params, + audio_translate_audio_params, + audio_transcribe_audio_params, +) +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + BinaryAPIResponse, + AsyncBinaryAPIResponse, + StreamedBinaryAPIResponse, + AsyncStreamedBinaryAPIResponse, + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + to_custom_raw_response_wrapper, + async_to_streamed_response_wrapper, + to_custom_streamed_response_wrapper, + async_to_custom_raw_response_wrapper, + async_to_custom_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.voice_ids_shared_param import VoiceIDsSharedParam +from ..types.audio_translate_audio_response import AudioTranslateAudioResponse +from ..types.audio_transcribe_audio_response import AudioTranscribeAudioResponse + +__all__ = ["AudioResource", "AsyncAudioResource"] + + +class AudioResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AudioResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AudioResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AudioResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AudioResourceWithStreamingResponse(self) + + def generate_speech( + self, + *, + input: str, + model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]], + voice: VoiceIDsSharedParam, + instructions: str | NotGiven = NOT_GIVEN, + response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, + speed: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BinaryAPIResponse: + """ + Generates audio from the input text. + + Args: + input: The text to generate audio for. The maximum length is 4096 characters. + + model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or + `gpt-4o-mini-tts`. + + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + `verse`. Previews of the voices are available in the + [Text to speech guide](/docs/guides/text-to-speech#voice-options). + + instructions: Control the voice of your generated audio with additional instructions. Does not + work with `tts-1` or `tts-1-hd`. + + response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + `wav`, and `pcm`. + + speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + the default. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} + return self._post( + "/audio/speech", + body=maybe_transform( + { + "input": input, + "model": model, + "voice": voice, + "instructions": instructions, + "response_format": response_format, + "speed": speed, + }, + audio_generate_speech_params.AudioGenerateSpeechParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BinaryAPIResponse, + ) + + def transcribe_audio( + self, + *, + file: FileTypes, + model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]], + include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN, + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AudioTranscribeAudioResponse: + """ + Transcribes audio into the input language. + + Args: + file: + The audio file object (not file name) to transcribe, in one of these formats: + flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + + model: ID of the model to use. The options are `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + Whisper V2 model). + + include: Additional information to include in the transcription response. `logprobs` will + return the log probabilities of the tokens in the response to understand the + model's confidence in the transcription. `logprobs` only works with + response_format set to `json` and only with the models `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`. + + language: The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + + prompt: An optional text to guide the model's style or continue a previous audio + segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the + audio language. + + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + the only supported format is `json`. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + for more information. + + Note: Streaming is not supported for the `whisper-1` model and will be ignored. + + temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + output more random, while lower values like 0.2 will make it more focused and + deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + + timestamp_granularities: The timestamp granularities to populate for this transcription. + `response_format` must be set `verbose_json` to use timestamp granularities. + Either or both of these options are supported: `word`, or `segment`. Note: There + is no additional latency for segment timestamps, but generating word timestamps + incurs additional latency. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "model": model, + "include": include, + "language": language, + "prompt": prompt, + "response_format": response_format, + "stream": stream, + "temperature": temperature, + "timestamp_granularities": timestamp_granularities, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return cast( + AudioTranscribeAudioResponse, + self._post( + "/audio/transcriptions", + body=maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, AudioTranscribeAudioResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + def translate_audio( + self, + *, + file: FileTypes, + model: Union[str, Literal["whisper-1"]], + prompt: str | NotGiven = NOT_GIVEN, + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AudioTranslateAudioResponse: + """ + Translates audio into English. + + Args: + file: The audio file object (not file name) translate, in one of these formats: flac, + mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + + model: ID of the model to use. Only `whisper-1` (which is powered by our open source + Whisper V2 model) is currently available. + + prompt: An optional text to guide the model's style or continue a previous audio + segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in + English. + + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. + + temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + output more random, while lower values like 0.2 will make it more focused and + deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "model": model, + "prompt": prompt, + "response_format": response_format, + "temperature": temperature, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return cast( + AudioTranslateAudioResponse, + self._post( + "/audio/translations", + body=maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, AudioTranslateAudioResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + +class AsyncAudioResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAudioResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAudioResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAudioResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAudioResourceWithStreamingResponse(self) + + async def generate_speech( + self, + *, + input: str, + model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]], + voice: VoiceIDsSharedParam, + instructions: str | NotGiven = NOT_GIVEN, + response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, + speed: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncBinaryAPIResponse: + """ + Generates audio from the input text. + + Args: + input: The text to generate audio for. The maximum length is 4096 characters. + + model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or + `gpt-4o-mini-tts`. + + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + `verse`. Previews of the voices are available in the + [Text to speech guide](/docs/guides/text-to-speech#voice-options). + + instructions: Control the voice of your generated audio with additional instructions. Does not + work with `tts-1` or `tts-1-hd`. + + response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + `wav`, and `pcm`. + + speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + the default. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} + return await self._post( + "/audio/speech", + body=await async_maybe_transform( + { + "input": input, + "model": model, + "voice": voice, + "instructions": instructions, + "response_format": response_format, + "speed": speed, + }, + audio_generate_speech_params.AudioGenerateSpeechParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AsyncBinaryAPIResponse, + ) + + async def transcribe_audio( + self, + *, + file: FileTypes, + model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]], + include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN, + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AudioTranscribeAudioResponse: + """ + Transcribes audio into the input language. + + Args: + file: + The audio file object (not file name) to transcribe, in one of these formats: + flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + + model: ID of the model to use. The options are `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + Whisper V2 model). + + include: Additional information to include in the transcription response. `logprobs` will + return the log probabilities of the tokens in the response to understand the + model's confidence in the transcription. `logprobs` only works with + response_format set to `json` and only with the models `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`. + + language: The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + + prompt: An optional text to guide the model's style or continue a previous audio + segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the + audio language. + + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + the only supported format is `json`. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + for more information. + + Note: Streaming is not supported for the `whisper-1` model and will be ignored. + + temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + output more random, while lower values like 0.2 will make it more focused and + deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + + timestamp_granularities: The timestamp granularities to populate for this transcription. + `response_format` must be set `verbose_json` to use timestamp granularities. + Either or both of these options are supported: `word`, or `segment`. Note: There + is no additional latency for segment timestamps, but generating word timestamps + incurs additional latency. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "model": model, + "include": include, + "language": language, + "prompt": prompt, + "response_format": response_format, + "stream": stream, + "temperature": temperature, + "timestamp_granularities": timestamp_granularities, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return cast( + AudioTranscribeAudioResponse, + await self._post( + "/audio/transcriptions", + body=await async_maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, AudioTranscribeAudioResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + async def translate_audio( + self, + *, + file: FileTypes, + model: Union[str, Literal["whisper-1"]], + prompt: str | NotGiven = NOT_GIVEN, + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AudioTranslateAudioResponse: + """ + Translates audio into English. + + Args: + file: The audio file object (not file name) translate, in one of these formats: flac, + mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + + model: ID of the model to use. Only `whisper-1` (which is powered by our open source + Whisper V2 model) is currently available. + + prompt: An optional text to guide the model's style or continue a previous audio + segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in + English. + + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. + + temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + output more random, while lower values like 0.2 will make it more focused and + deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "model": model, + "prompt": prompt, + "response_format": response_format, + "temperature": temperature, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return cast( + AudioTranslateAudioResponse, + await self._post( + "/audio/translations", + body=await async_maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, AudioTranslateAudioResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + +class AudioResourceWithRawResponse: + def __init__(self, audio: AudioResource) -> None: + self._audio = audio + + self.generate_speech = to_custom_raw_response_wrapper( + audio.generate_speech, + BinaryAPIResponse, + ) + self.transcribe_audio = to_raw_response_wrapper( + audio.transcribe_audio, + ) + self.translate_audio = to_raw_response_wrapper( + audio.translate_audio, + ) + + +class AsyncAudioResourceWithRawResponse: + def __init__(self, audio: AsyncAudioResource) -> None: + self._audio = audio + + self.generate_speech = async_to_custom_raw_response_wrapper( + audio.generate_speech, + AsyncBinaryAPIResponse, + ) + self.transcribe_audio = async_to_raw_response_wrapper( + audio.transcribe_audio, + ) + self.translate_audio = async_to_raw_response_wrapper( + audio.translate_audio, + ) + + +class AudioResourceWithStreamingResponse: + def __init__(self, audio: AudioResource) -> None: + self._audio = audio + + self.generate_speech = to_custom_streamed_response_wrapper( + audio.generate_speech, + StreamedBinaryAPIResponse, + ) + self.transcribe_audio = to_streamed_response_wrapper( + audio.transcribe_audio, + ) + self.translate_audio = to_streamed_response_wrapper( + audio.translate_audio, + ) + + +class AsyncAudioResourceWithStreamingResponse: + def __init__(self, audio: AsyncAudioResource) -> None: + self._audio = audio + + self.generate_speech = async_to_custom_streamed_response_wrapper( + audio.generate_speech, + AsyncStreamedBinaryAPIResponse, + ) + self.transcribe_audio = async_to_streamed_response_wrapper( + audio.transcribe_audio, + ) + self.translate_audio = async_to_streamed_response_wrapper( + audio.translate_audio, + ) diff --git a/src/digitalocean_genai_sdk/resources/batches.py b/src/digitalocean_genai_sdk/resources/batches.py new file mode 100644 index 00000000..a2b1fedf --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/batches.py @@ -0,0 +1,513 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal + +import httpx + +from ..types import batch_list_params, batch_create_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..types.batch import Batch +from .._base_client import make_request_options +from ..types.batch_list_response import BatchListResponse + +__all__ = ["BatchesResource", "AsyncBatchesResource"] + + +class BatchesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> BatchesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return BatchesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> BatchesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return BatchesResourceWithStreamingResponse(self) + + def create( + self, + *, + completion_window: Literal["24h"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + input_file_id: str, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """ + Creates and executes a batch from an uploaded file of requests + + Args: + completion_window: The time frame within which the batch should be processed. Currently only `24h` + is supported. + + endpoint: The endpoint to be used for all requests in the batch. Currently + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. + + input_file_id: The ID of an uploaded file that contains requests for the new batch. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your input file must be formatted as a + [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with + the purpose `batch`. The file can contain up to 50,000 requests, and can be up + to 200 MB in size. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/batches", + body=maybe_transform( + { + "completion_window": completion_window, + "endpoint": endpoint, + "input_file_id": input_file_id, + "metadata": metadata, + }, + batch_create_params.BatchCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Batch, + ) + + def retrieve( + self, + batch_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """ + Retrieves a batch. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._get( + f"/batches/{batch_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Batch, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BatchListResponse: + """List your organization's batches. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/batches", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + batch_list_params.BatchListParams, + ), + ), + cast_to=BatchListResponse, + ) + + def cancel( + self, + batch_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """Cancels an in-progress batch. + + The batch will be in status `cancelling` for up to + 10 minutes, before changing to `cancelled`, where it will have partial results + (if any) available in the output file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._post( + f"/batches/{batch_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Batch, + ) + + +class AsyncBatchesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncBatchesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncBatchesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncBatchesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncBatchesResourceWithStreamingResponse(self) + + async def create( + self, + *, + completion_window: Literal["24h"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + input_file_id: str, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """ + Creates and executes a batch from an uploaded file of requests + + Args: + completion_window: The time frame within which the batch should be processed. Currently only `24h` + is supported. + + endpoint: The endpoint to be used for all requests in the batch. Currently + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. + + input_file_id: The ID of an uploaded file that contains requests for the new batch. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your input file must be formatted as a + [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with + the purpose `batch`. The file can contain up to 50,000 requests, and can be up + to 200 MB in size. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/batches", + body=await async_maybe_transform( + { + "completion_window": completion_window, + "endpoint": endpoint, + "input_file_id": input_file_id, + "metadata": metadata, + }, + batch_create_params.BatchCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Batch, + ) + + async def retrieve( + self, + batch_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """ + Retrieves a batch. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return await self._get( + f"/batches/{batch_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Batch, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BatchListResponse: + """List your organization's batches. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/batches", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + batch_list_params.BatchListParams, + ), + ), + cast_to=BatchListResponse, + ) + + async def cancel( + self, + batch_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Batch: + """Cancels an in-progress batch. + + The batch will be in status `cancelling` for up to + 10 minutes, before changing to `cancelled`, where it will have partial results + (if any) available in the output file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return await self._post( + f"/batches/{batch_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Batch, + ) + + +class BatchesResourceWithRawResponse: + def __init__(self, batches: BatchesResource) -> None: + self._batches = batches + + self.create = to_raw_response_wrapper( + batches.create, + ) + self.retrieve = to_raw_response_wrapper( + batches.retrieve, + ) + self.list = to_raw_response_wrapper( + batches.list, + ) + self.cancel = to_raw_response_wrapper( + batches.cancel, + ) + + +class AsyncBatchesResourceWithRawResponse: + def __init__(self, batches: AsyncBatchesResource) -> None: + self._batches = batches + + self.create = async_to_raw_response_wrapper( + batches.create, + ) + self.retrieve = async_to_raw_response_wrapper( + batches.retrieve, + ) + self.list = async_to_raw_response_wrapper( + batches.list, + ) + self.cancel = async_to_raw_response_wrapper( + batches.cancel, + ) + + +class BatchesResourceWithStreamingResponse: + def __init__(self, batches: BatchesResource) -> None: + self._batches = batches + + self.create = to_streamed_response_wrapper( + batches.create, + ) + self.retrieve = to_streamed_response_wrapper( + batches.retrieve, + ) + self.list = to_streamed_response_wrapper( + batches.list, + ) + self.cancel = to_streamed_response_wrapper( + batches.cancel, + ) + + +class AsyncBatchesResourceWithStreamingResponse: + def __init__(self, batches: AsyncBatchesResource) -> None: + self._batches = batches + + self.create = async_to_streamed_response_wrapper( + batches.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + batches.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + batches.list, + ) + self.cancel = async_to_streamed_response_wrapper( + batches.cancel, + ) diff --git a/src/digitalocean_genai_sdk/resources/chat/__init__.py b/src/digitalocean_genai_sdk/resources/chat/__init__.py new file mode 100644 index 00000000..ec960eb4 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/chat/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = [ + "CompletionsResource", + "AsyncCompletionsResource", + "CompletionsResourceWithRawResponse", + "AsyncCompletionsResourceWithRawResponse", + "CompletionsResourceWithStreamingResponse", + "AsyncCompletionsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/chat/chat.py b/src/digitalocean_genai_sdk/resources/chat/chat.py new file mode 100644 index 00000000..df1f356c --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/chat/chat.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = ["ChatResource", "AsyncChatResource"] + + +class ChatResource(SyncAPIResource): + @cached_property + def completions(self) -> CompletionsResource: + return CompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> ChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ChatResourceWithStreamingResponse(self) + + +class AsyncChatResource(AsyncAPIResource): + @cached_property + def completions(self) -> AsyncCompletionsResource: + return AsyncCompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncChatResourceWithStreamingResponse(self) + + +class ChatResourceWithRawResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithRawResponse: + return CompletionsResourceWithRawResponse(self._chat.completions) + + +class AsyncChatResourceWithRawResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithRawResponse: + return AsyncCompletionsResourceWithRawResponse(self._chat.completions) + + +class ChatResourceWithStreamingResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithStreamingResponse: + return CompletionsResourceWithStreamingResponse(self._chat.completions) + + +class AsyncChatResourceWithStreamingResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithStreamingResponse: + return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions) diff --git a/src/digitalocean_genai_sdk/resources/chat/completions.py b/src/digitalocean_genai_sdk/resources/chat/completions.py new file mode 100644 index 00000000..c0908a57 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/chat/completions.py @@ -0,0 +1,1233 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from ...types import ReasoningEffort +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.chat import ( + completion_list_params, + completion_create_params, + completion_update_params, + completion_list_messages_params, +) +from ..._base_client import make_request_options +from ...types.reasoning_effort import ReasoningEffort +from ...types.chat.create_response import CreateResponse +from ...types.stop_configuration_param import StopConfigurationParam +from ...types.chat.model_ids_shared_param import ModelIDsSharedParam +from ...types.chat.completion_list_response import CompletionListResponse +from ...types.chat.completion_delete_response import CompletionDeleteResponse +from ...types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from ...types.chat.completion_list_messages_response import CompletionListMessagesResponse + +__all__ = ["CompletionsResource", "AsyncCompletionsResource"] + + +class CompletionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return CompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return CompletionsResourceWithStreamingResponse(self) + + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: ModelIDsSharedParam, + audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, + stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """ + **Starting a new project?** We recommend trying + [Responses](/docs/api-reference/responses) to take advantage of the latest + OpenAI platform features. Compare + [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- + + Creates a model response for the given chat conversation. Learn more in the + [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), + and [audio](/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](/docs/guides/reasoning). + + Args: + messages: A list of messages comprising the conversation so far. Depending on the + [model](/docs/models) you use, different message types (modalities) are + supported, like [text](/docs/guides/text-generation), + [images](/docs/guides/vision), and [audio](/docs/guides/audio). + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the [model guide](/docs/models) to browse and compare + available models. + + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a + function. + + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with [o1 series models](/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](/docs/guides/audio). To request that this model generate both + text and audio responses, you can use: + + `["text", "audio"]` + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + parallel_tool_calls: Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](/docs/guides/distillation) or + [evals](/docs/guides/evals) products. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/chat/streaming) for more + information, along with the + [streaming responses](/docs/guides/streaming-responses) guide for more + information on how to handle the streaming events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. A max of 128 functions are supported. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/chat/completions", + body=maybe_transform( + { + "messages": messages, + "model": model, + "audio": audio, + "frequency_penalty": frequency_penalty, + "function_call": function_call, + "functions": functions, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, + "n": n, + "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, + "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "seed": seed, + "service_tier": service_tier, + "stop": stop, + "store": store, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + "web_search_options": web_search_options, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + def retrieve( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """Get a stored chat completion. + + Only Chat Completions that have been created with + the `store` parameter set to `true` will be returned. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._get( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + def update( + self, + completion_id: str, + *, + metadata: Optional[Dict[str, str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """Modify a stored chat completion. + + Only Chat Completions that have been created + with the `store` parameter set to `true` can be modified. Currently, the only + supported modification is to update the `metadata` field. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._post( + f"/chat/completions/{completion_id}", + body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: str | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionListResponse: + """List stored Chat Completions. + + Only Chat Completions that have been stored with + the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last chat completion from the previous pagination request. + + limit: Number of Chat Completions to retrieve. + + metadata: + A list of metadata keys to filter the Chat Completions by. Example: + + `metadata[key1]=value1&metadata[key2]=value2` + + model: The model used to generate the Chat Completions. + + order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + `desc` for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/chat/completions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "metadata": metadata, + "model": model, + "order": order, + }, + completion_list_params.CompletionListParams, + ), + ), + cast_to=CompletionListResponse, + ) + + def delete( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionDeleteResponse: + """Delete a stored chat completion. + + Only Chat Completions that have been created + with the `store` parameter set to `true` can be deleted. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._delete( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionDeleteResponse, + ) + + def list_messages( + self, + completion_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionListMessagesResponse: + """Get the messages in a stored chat completion. + + Only Chat Completions that have + been created with the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last message from the previous pagination request. + + limit: Number of messages to retrieve. + + order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return self._get( + f"/chat/completions/{completion_id}/messages", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + completion_list_messages_params.CompletionListMessagesParams, + ), + ), + cast_to=CompletionListMessagesResponse, + ) + + +class AsyncCompletionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncCompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncCompletionsResourceWithStreamingResponse(self) + + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: ModelIDsSharedParam, + audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, + stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """ + **Starting a new project?** We recommend trying + [Responses](/docs/api-reference/responses) to take advantage of the latest + OpenAI platform features. Compare + [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). + + --- + + Creates a model response for the given chat conversation. Learn more in the + [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), + and [audio](/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](/docs/guides/reasoning). + + Args: + messages: A list of messages comprising the conversation so far. Depending on the + [model](/docs/models) you use, different message types (modalities) are + supported, like [text](/docs/guides/text-generation), + [images](/docs/guides/vision), and [audio](/docs/guides/audio). + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the [model guide](/docs/models) to browse and compare + available models. + + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + function_call: Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a + function. + + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + + functions: Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with [o1 series models](/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + modalities: Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](/docs/guides/audio). To request that this model generate both + text and audio responses, you can use: + + `["text", "audio"]` + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + parallel_tool_calls: Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + + seed: This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + + service_tier: Specifies the latency tier to use for processing the request. This parameter is + relevant for customers subscribed to the scale tier service: + + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` + utilized. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](/docs/guides/distillation) or + [evals](/docs/guides/evals) products. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/chat/streaming) for more + information, along with the + [streaming responses](/docs/guides/streaming-responses) guide for more + information on how to handle the streaming events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. Use this to provide a list of functions the model may generate JSON inputs + for. A max of 128 functions are supported. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + web_search_options: This tool searches the web for relevant results to use in a response. Learn more + about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/chat/completions", + body=await async_maybe_transform( + { + "messages": messages, + "model": model, + "audio": audio, + "frequency_penalty": frequency_penalty, + "function_call": function_call, + "functions": functions, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, + "n": n, + "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, + "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "seed": seed, + "service_tier": service_tier, + "stop": stop, + "store": store, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + "web_search_options": web_search_options, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + async def retrieve( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """Get a stored chat completion. + + Only Chat Completions that have been created with + the `store` parameter set to `true` will be returned. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._get( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + async def update( + self, + completion_id: str, + *, + metadata: Optional[Dict[str, str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CreateResponse: + """Modify a stored chat completion. + + Only Chat Completions that have been created + with the `store` parameter set to `true` can be modified. Currently, the only + supported modification is to update the `metadata` field. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._post( + f"/chat/completions/{completion_id}", + body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CreateResponse, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: str | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionListResponse: + """List stored Chat Completions. + + Only Chat Completions that have been stored with + the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last chat completion from the previous pagination request. + + limit: Number of Chat Completions to retrieve. + + metadata: + A list of metadata keys to filter the Chat Completions by. Example: + + `metadata[key1]=value1&metadata[key2]=value2` + + model: The model used to generate the Chat Completions. + + order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + `desc` for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/chat/completions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + "metadata": metadata, + "model": model, + "order": order, + }, + completion_list_params.CompletionListParams, + ), + ), + cast_to=CompletionListResponse, + ) + + async def delete( + self, + completion_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionDeleteResponse: + """Delete a stored chat completion. + + Only Chat Completions that have been created + with the `store` parameter set to `true` can be deleted. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._delete( + f"/chat/completions/{completion_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionDeleteResponse, + ) + + async def list_messages( + self, + completion_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionListMessagesResponse: + """Get the messages in a stored chat completion. + + Only Chat Completions that have + been created with the `store` parameter set to `true` will be returned. + + Args: + after: Identifier for the last message from the previous pagination request. + + limit: Number of messages to retrieve. + + order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + for descending order. Defaults to `asc`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not completion_id: + raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") + return await self._get( + f"/chat/completions/{completion_id}/messages", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + completion_list_messages_params.CompletionListMessagesParams, + ), + ), + cast_to=CompletionListMessagesResponse, + ) + + +class CompletionsResourceWithRawResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_raw_response_wrapper( + completions.create, + ) + self.retrieve = to_raw_response_wrapper( + completions.retrieve, + ) + self.update = to_raw_response_wrapper( + completions.update, + ) + self.list = to_raw_response_wrapper( + completions.list, + ) + self.delete = to_raw_response_wrapper( + completions.delete, + ) + self.list_messages = to_raw_response_wrapper( + completions.list_messages, + ) + + +class AsyncCompletionsResourceWithRawResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_raw_response_wrapper( + completions.create, + ) + self.retrieve = async_to_raw_response_wrapper( + completions.retrieve, + ) + self.update = async_to_raw_response_wrapper( + completions.update, + ) + self.list = async_to_raw_response_wrapper( + completions.list, + ) + self.delete = async_to_raw_response_wrapper( + completions.delete, + ) + self.list_messages = async_to_raw_response_wrapper( + completions.list_messages, + ) + + +class CompletionsResourceWithStreamingResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_streamed_response_wrapper( + completions.create, + ) + self.retrieve = to_streamed_response_wrapper( + completions.retrieve, + ) + self.update = to_streamed_response_wrapper( + completions.update, + ) + self.list = to_streamed_response_wrapper( + completions.list, + ) + self.delete = to_streamed_response_wrapper( + completions.delete, + ) + self.list_messages = to_streamed_response_wrapper( + completions.list_messages, + ) + + +class AsyncCompletionsResourceWithStreamingResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_streamed_response_wrapper( + completions.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + completions.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + completions.update, + ) + self.list = async_to_streamed_response_wrapper( + completions.list, + ) + self.delete = async_to_streamed_response_wrapper( + completions.delete, + ) + self.list_messages = async_to_streamed_response_wrapper( + completions.list_messages, + ) diff --git a/src/digitalocean_genai_sdk/resources/completions.py b/src/digitalocean_genai_sdk/resources/completions.py new file mode 100644 index 00000000..ff495166 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/completions.py @@ -0,0 +1,460 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from ..types import completion_create_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.stop_configuration_param import StopConfigurationParam +from ..types.completion_create_response import CompletionCreateResponse +from ..types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam + +__all__ = ["CompletionsResource", "AsyncCompletionsResource"] + + +class CompletionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return CompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return CompletionsResourceWithStreamingResponse(self) + + def create( + self, + *, + model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + best_of: Optional[int] | NotGiven = NOT_GIVEN, + echo: Optional[bool] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + suffix: Optional[str] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a completion for the provided prompt and parameters. + + Args: + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + prompt: The prompt(s) to generate completions for, encoded as a string, array of + strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during + training, so if a prompt is not specified the model will generate as if from the + beginning of a new document. + + best_of: Generates `best_of` completions server-side and returns the "best" (the one with + the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and + `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly + consume your token quota. Use carefully and ensure that you have reasonable + settings for `max_tokens` and `stop`. + + echo: Echo back the prompt in addition to the completion + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation) + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT + tokenizer) to an associated bias value from -100 to 100. You can use this + [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + Mathematically, the bias is added to the logits generated by the model prior to + sampling. The exact effect will vary per model, but values between -1 and 1 + should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + from being generated. + + logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as + well the chosen tokens. For example, if `logprobs` is 5, the API will return a + list of the 5 most likely tokens. The API will always return the `logprob` of + the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the + completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + + n: How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly + consume your token quota. Use carefully and ensure that you have reasonable + settings for `max_tokens` and `stop`. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation) + + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: Whether to stream back partial progress. If set, tokens will be sent as + data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` + message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + suffix: The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/completions", + body=maybe_transform( + { + "model": model, + "prompt": prompt, + "best_of": best_of, + "echo": echo, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "presence_penalty": presence_penalty, + "seed": seed, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "suffix": suffix, + "temperature": temperature, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class AsyncCompletionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncCompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncCompletionsResourceWithStreamingResponse(self) + + async def create( + self, + *, + model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], + prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + best_of: Optional[int] | NotGiven = NOT_GIVEN, + echo: Optional[bool] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + suffix: Optional[str] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a completion for the provided prompt and parameters. + + Args: + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + prompt: The prompt(s) to generate completions for, encoded as a string, array of + strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during + training, so if a prompt is not specified the model will generate as if from the + beginning of a new document. + + best_of: Generates `best_of` completions server-side and returns the "best" (the one with + the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and + `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly + consume your token quota. Use carefully and ensure that you have reasonable + settings for `max_tokens` and `stop`. + + echo: Echo back the prompt in addition to the completion + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation) + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT + tokenizer) to an associated bias value from -100 to 100. You can use this + [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + Mathematically, the bias is added to the logits generated by the model prior to + sampling. The exact effect will vary per model, but values between -1 and 1 + should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + from being generated. + + logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as + well the chosen tokens. For example, if `logprobs` is 5, the API will return a + list of the 5 most likely tokens. The API will always return the `logprob` of + the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the + completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + + n: How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly + consume your token quota. Use carefully and ensure that you have reasonable + settings for `max_tokens` and `stop`. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation) + + seed: If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: Whether to stream back partial progress. If set, tokens will be sent as + data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` + message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + suffix: The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/completions", + body=await async_maybe_transform( + { + "model": model, + "prompt": prompt, + "best_of": best_of, + "echo": echo, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "presence_penalty": presence_penalty, + "seed": seed, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "suffix": suffix, + "temperature": temperature, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class CompletionsResourceWithRawResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_raw_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithRawResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_raw_response_wrapper( + completions.create, + ) + + +class CompletionsResourceWithStreamingResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_streamed_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithStreamingResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_streamed_response_wrapper( + completions.create, + ) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py new file mode 100644 index 00000000..92552f62 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/embeddings.py @@ -0,0 +1,236 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal + +import httpx + +from ..types import embedding_create_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.embedding_create_response import EmbeddingCreateResponse + +__all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"] + + +class EmbeddingsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return EmbeddingsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return EmbeddingsResourceWithStreamingResponse(self) + + def create( + self, + *, + input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], + model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], + dimensions: int | NotGiven = NOT_GIVEN, + encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EmbeddingCreateResponse: + """ + Creates an embedding vector representing the input text. + + Args: + input: Input text to embed, encoded as a string or array of tokens. To embed multiple + inputs in a single request, pass an array of strings or array of token arrays. + The input must not exceed the max input tokens for the model (8192 tokens for + `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + dimensions or less. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. + + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + dimensions: The number of dimensions the resulting output embeddings should have. Only + supported in `text-embedding-3` and later models. + + encoding_format: The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/embeddings", + body=maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + "user": user, + }, + embedding_create_params.EmbeddingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EmbeddingCreateResponse, + ) + + +class AsyncEmbeddingsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncEmbeddingsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncEmbeddingsResourceWithStreamingResponse(self) + + async def create( + self, + *, + input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], + model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], + dimensions: int | NotGiven = NOT_GIVEN, + encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EmbeddingCreateResponse: + """ + Creates an embedding vector representing the input text. + + Args: + input: Input text to embed, encoded as a string or array of tokens. To embed multiple + inputs in a single request, pass an array of strings or array of token arrays. + The input must not exceed the max input tokens for the model (8192 tokens for + `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + dimensions or less. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. + + model: ID of the model to use. You can use the + [List models](/docs/api-reference/models/list) API to see all of your available + models, or see our [Model overview](/docs/models) for descriptions of them. + + dimensions: The number of dimensions the resulting output embeddings should have. Only + supported in `text-embedding-3` and later models. + + encoding_format: The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/embeddings", + body=await async_maybe_transform( + { + "input": input, + "model": model, + "dimensions": dimensions, + "encoding_format": encoding_format, + "user": user, + }, + embedding_create_params.EmbeddingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EmbeddingCreateResponse, + ) + + +class EmbeddingsResourceWithRawResponse: + def __init__(self, embeddings: EmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = to_raw_response_wrapper( + embeddings.create, + ) + + +class AsyncEmbeddingsResourceWithRawResponse: + def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = async_to_raw_response_wrapper( + embeddings.create, + ) + + +class EmbeddingsResourceWithStreamingResponse: + def __init__(self, embeddings: EmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = to_streamed_response_wrapper( + embeddings.create, + ) + + +class AsyncEmbeddingsResourceWithStreamingResponse: + def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: + self._embeddings = embeddings + + self.create = async_to_streamed_response_wrapper( + embeddings.create, + ) diff --git a/src/digitalocean_genai_sdk/resources/files.py b/src/digitalocean_genai_sdk/resources/files.py new file mode 100644 index 00000000..65e459f4 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/files.py @@ -0,0 +1,608 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Mapping, cast +from typing_extensions import Literal + +import httpx + +from ..types import file_list_params, file_upload_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.openai_file import OpenAIFile +from ..types.file_list_response import FileListResponse +from ..types.file_delete_response import FileDeleteResponse + +__all__ = ["FilesResource", "AsyncFilesResource"] + + +class FilesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return FilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return FilesResourceWithStreamingResponse(self) + + def retrieve( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OpenAIFile: + """ + Returns information about a specific file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OpenAIFile, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + purpose: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileListResponse: + """Returns a list of files. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 10,000, and the default is 10,000. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + purpose: Only return files with the given purpose. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "purpose": purpose, + }, + file_list_params.FileListParams, + ), + ), + cast_to=FileListResponse, + ) + + def delete( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """ + Delete a file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._delete( + f"/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + def retrieve_content( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> str: + """ + Returns the contents of the specified file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=str, + ) + + def upload( + self, + *, + file: FileTypes, + purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OpenAIFile: + """Upload a file that can be used across various endpoints. + + Individual files can be + up to 512 MB, and the size of all files uploaded by one organization can be up + to 100 GB. + + The Assistants API supports files up to 2 million tokens and of specific file + types. See the [Assistants Tools guide](/docs/assistants/tools) for details. + + The Fine-tuning API only supports `.jsonl` files. The input also has certain + required formats for fine-tuning + [chat](/docs/api-reference/fine-tuning/chat-input) or + [completions](/docs/api-reference/fine-tuning/completions-input) models. + + The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + has a specific required [format](/docs/api-reference/batch/request-input). + + Please [contact us](https://help.openai.com/) if you need to increase these + storage limits. + + Args: + file: The File object (not file name) to be uploaded. + + purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the + Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + Flexible file type for any purpose - `evals`: Used for eval data sets + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "purpose": purpose, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/files", + body=maybe_transform(body, file_upload_params.FileUploadParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OpenAIFile, + ) + + +class AsyncFilesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncFilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncFilesResourceWithStreamingResponse(self) + + async def retrieve( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OpenAIFile: + """ + Returns information about a specific file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OpenAIFile, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + purpose: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileListResponse: + """Returns a list of files. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 10,000, and the default is 10,000. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + purpose: Only return files with the given purpose. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "purpose": purpose, + }, + file_list_params.FileListParams, + ), + ), + cast_to=FileListResponse, + ) + + async def delete( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """ + Delete a file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._delete( + f"/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + async def retrieve_content( + self, + file_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> str: + """ + Returns the contents of the specified file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=str, + ) + + async def upload( + self, + *, + file: FileTypes, + purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OpenAIFile: + """Upload a file that can be used across various endpoints. + + Individual files can be + up to 512 MB, and the size of all files uploaded by one organization can be up + to 100 GB. + + The Assistants API supports files up to 2 million tokens and of specific file + types. See the [Assistants Tools guide](/docs/assistants/tools) for details. + + The Fine-tuning API only supports `.jsonl` files. The input also has certain + required formats for fine-tuning + [chat](/docs/api-reference/fine-tuning/chat-input) or + [completions](/docs/api-reference/fine-tuning/completions-input) models. + + The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + has a specific required [format](/docs/api-reference/batch/request-input). + + Please [contact us](https://help.openai.com/) if you need to increase these + storage limits. + + Args: + file: The File object (not file name) to be uploaded. + + purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the + Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + Flexible file type for any purpose - `evals`: Used for eval data sets + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "file": file, + "purpose": purpose, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/files", + body=await async_maybe_transform(body, file_upload_params.FileUploadParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OpenAIFile, + ) + + +class FilesResourceWithRawResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.retrieve = to_raw_response_wrapper( + files.retrieve, + ) + self.list = to_raw_response_wrapper( + files.list, + ) + self.delete = to_raw_response_wrapper( + files.delete, + ) + self.retrieve_content = to_raw_response_wrapper( + files.retrieve_content, + ) + self.upload = to_raw_response_wrapper( + files.upload, + ) + + +class AsyncFilesResourceWithRawResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.retrieve = async_to_raw_response_wrapper( + files.retrieve, + ) + self.list = async_to_raw_response_wrapper( + files.list, + ) + self.delete = async_to_raw_response_wrapper( + files.delete, + ) + self.retrieve_content = async_to_raw_response_wrapper( + files.retrieve_content, + ) + self.upload = async_to_raw_response_wrapper( + files.upload, + ) + + +class FilesResourceWithStreamingResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.retrieve = to_streamed_response_wrapper( + files.retrieve, + ) + self.list = to_streamed_response_wrapper( + files.list, + ) + self.delete = to_streamed_response_wrapper( + files.delete, + ) + self.retrieve_content = to_streamed_response_wrapper( + files.retrieve_content, + ) + self.upload = to_streamed_response_wrapper( + files.upload, + ) + + +class AsyncFilesResourceWithStreamingResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.retrieve = async_to_streamed_response_wrapper( + files.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + files.list, + ) + self.delete = async_to_streamed_response_wrapper( + files.delete, + ) + self.retrieve_content = async_to_streamed_response_wrapper( + files.retrieve_content, + ) + self.upload = async_to_streamed_response_wrapper( + files.upload, + ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py new file mode 100644 index 00000000..5f198d2e --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .jobs import ( + JobsResource, + AsyncJobsResource, + JobsResourceWithRawResponse, + AsyncJobsResourceWithRawResponse, + JobsResourceWithStreamingResponse, + AsyncJobsResourceWithStreamingResponse, +) +from .checkpoints import ( + CheckpointsResource, + AsyncCheckpointsResource, + CheckpointsResourceWithRawResponse, + AsyncCheckpointsResourceWithRawResponse, + CheckpointsResourceWithStreamingResponse, + AsyncCheckpointsResourceWithStreamingResponse, +) +from .fine_tuning import ( + FineTuningResource, + AsyncFineTuningResource, + FineTuningResourceWithRawResponse, + AsyncFineTuningResourceWithRawResponse, + FineTuningResourceWithStreamingResponse, + AsyncFineTuningResourceWithStreamingResponse, +) + +__all__ = [ + "CheckpointsResource", + "AsyncCheckpointsResource", + "CheckpointsResourceWithRawResponse", + "AsyncCheckpointsResourceWithRawResponse", + "CheckpointsResourceWithStreamingResponse", + "AsyncCheckpointsResourceWithStreamingResponse", + "JobsResource", + "AsyncJobsResource", + "JobsResourceWithRawResponse", + "AsyncJobsResourceWithRawResponse", + "JobsResourceWithStreamingResponse", + "AsyncJobsResourceWithStreamingResponse", + "FineTuningResource", + "AsyncFineTuningResource", + "FineTuningResourceWithRawResponse", + "AsyncFineTuningResourceWithRawResponse", + "FineTuningResourceWithStreamingResponse", + "AsyncFineTuningResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py new file mode 100644 index 00000000..3f6710f0 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .checkpoints import ( + CheckpointsResource, + AsyncCheckpointsResource, + CheckpointsResourceWithRawResponse, + AsyncCheckpointsResourceWithRawResponse, + CheckpointsResourceWithStreamingResponse, + AsyncCheckpointsResourceWithStreamingResponse, +) +from .permissions import ( + PermissionsResource, + AsyncPermissionsResource, + PermissionsResourceWithRawResponse, + AsyncPermissionsResourceWithRawResponse, + PermissionsResourceWithStreamingResponse, + AsyncPermissionsResourceWithStreamingResponse, +) + +__all__ = [ + "PermissionsResource", + "AsyncPermissionsResource", + "PermissionsResourceWithRawResponse", + "AsyncPermissionsResourceWithRawResponse", + "PermissionsResourceWithStreamingResponse", + "AsyncPermissionsResourceWithStreamingResponse", + "CheckpointsResource", + "AsyncCheckpointsResource", + "CheckpointsResourceWithRawResponse", + "AsyncCheckpointsResourceWithRawResponse", + "CheckpointsResourceWithStreamingResponse", + "AsyncCheckpointsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py new file mode 100644 index 00000000..b1a85058 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ...._compat import cached_property +from .permissions import ( + PermissionsResource, + AsyncPermissionsResource, + PermissionsResourceWithRawResponse, + AsyncPermissionsResourceWithRawResponse, + PermissionsResourceWithStreamingResponse, + AsyncPermissionsResourceWithStreamingResponse, +) +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"] + + +class CheckpointsResource(SyncAPIResource): + @cached_property + def permissions(self) -> PermissionsResource: + return PermissionsResource(self._client) + + @cached_property + def with_raw_response(self) -> CheckpointsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return CheckpointsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return CheckpointsResourceWithStreamingResponse(self) + + +class AsyncCheckpointsResource(AsyncAPIResource): + @cached_property + def permissions(self) -> AsyncPermissionsResource: + return AsyncPermissionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncCheckpointsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncCheckpointsResourceWithStreamingResponse(self) + + +class CheckpointsResourceWithRawResponse: + def __init__(self, checkpoints: CheckpointsResource) -> None: + self._checkpoints = checkpoints + + @cached_property + def permissions(self) -> PermissionsResourceWithRawResponse: + return PermissionsResourceWithRawResponse(self._checkpoints.permissions) + + +class AsyncCheckpointsResourceWithRawResponse: + def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: + self._checkpoints = checkpoints + + @cached_property + def permissions(self) -> AsyncPermissionsResourceWithRawResponse: + return AsyncPermissionsResourceWithRawResponse(self._checkpoints.permissions) + + +class CheckpointsResourceWithStreamingResponse: + def __init__(self, checkpoints: CheckpointsResource) -> None: + self._checkpoints = checkpoints + + @cached_property + def permissions(self) -> PermissionsResourceWithStreamingResponse: + return PermissionsResourceWithStreamingResponse(self._checkpoints.permissions) + + +class AsyncCheckpointsResourceWithStreamingResponse: + def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: + self._checkpoints = checkpoints + + @cached_property + def permissions(self) -> AsyncPermissionsResourceWithStreamingResponse: + return AsyncPermissionsResourceWithStreamingResponse(self._checkpoints.permissions) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py new file mode 100644 index 00000000..0dee4435 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py @@ -0,0 +1,401 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params +from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse +from ....types.fine_tuning.checkpoints.list_fine_tuning_checkpoint_permission import ListFineTuningCheckpointPermission + +__all__ = ["PermissionsResource", "AsyncPermissionsResource"] + + +class PermissionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> PermissionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return PermissionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> PermissionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return PermissionsResourceWithStreamingResponse(self) + + def create( + self, + permission_id: str, + *, + project_ids: List[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListFineTuningCheckpointPermission: + """ + **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + + This enables organization owners to share fine-tuned models with other projects + in their organization. + + Args: + project_ids: The project identifiers to grant access to. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not permission_id: + raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") + return self._post( + f"/fine_tuning/checkpoints/{permission_id}/permissions", + body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ListFineTuningCheckpointPermission, + ) + + def retrieve( + self, + permission_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListFineTuningCheckpointPermission: + """ + **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + + Organization owners can use this endpoint to view all permissions for a + fine-tuned model checkpoint. + + Args: + after: Identifier for the last permission ID from the previous pagination request. + + limit: Number of permissions to retrieve. + + order: The order in which to retrieve permissions. + + project_id: The ID of the project to get permissions for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not permission_id: + raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") + return self._get( + f"/fine_tuning/checkpoints/{permission_id}/permissions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "project_id": project_id, + }, + permission_retrieve_params.PermissionRetrieveParams, + ), + ), + cast_to=ListFineTuningCheckpointPermission, + ) + + def delete( + self, + permission_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> PermissionDeleteResponse: + """ + **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + + Organization owners can use this endpoint to delete a permission for a + fine-tuned model checkpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not permission_id: + raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") + return self._delete( + f"/fine_tuning/checkpoints/{permission_id}/permissions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=PermissionDeleteResponse, + ) + + +class AsyncPermissionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncPermissionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncPermissionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncPermissionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncPermissionsResourceWithStreamingResponse(self) + + async def create( + self, + permission_id: str, + *, + project_ids: List[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListFineTuningCheckpointPermission: + """ + **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + + This enables organization owners to share fine-tuned models with other projects + in their organization. + + Args: + project_ids: The project identifiers to grant access to. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not permission_id: + raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") + return await self._post( + f"/fine_tuning/checkpoints/{permission_id}/permissions", + body=await async_maybe_transform( + {"project_ids": project_ids}, permission_create_params.PermissionCreateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ListFineTuningCheckpointPermission, + ) + + async def retrieve( + self, + permission_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListFineTuningCheckpointPermission: + """ + **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + + Organization owners can use this endpoint to view all permissions for a + fine-tuned model checkpoint. + + Args: + after: Identifier for the last permission ID from the previous pagination request. + + limit: Number of permissions to retrieve. + + order: The order in which to retrieve permissions. + + project_id: The ID of the project to get permissions for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not permission_id: + raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") + return await self._get( + f"/fine_tuning/checkpoints/{permission_id}/permissions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "project_id": project_id, + }, + permission_retrieve_params.PermissionRetrieveParams, + ), + ), + cast_to=ListFineTuningCheckpointPermission, + ) + + async def delete( + self, + permission_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> PermissionDeleteResponse: + """ + **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + + Organization owners can use this endpoint to delete a permission for a + fine-tuned model checkpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not permission_id: + raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") + return await self._delete( + f"/fine_tuning/checkpoints/{permission_id}/permissions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=PermissionDeleteResponse, + ) + + +class PermissionsResourceWithRawResponse: + def __init__(self, permissions: PermissionsResource) -> None: + self._permissions = permissions + + self.create = to_raw_response_wrapper( + permissions.create, + ) + self.retrieve = to_raw_response_wrapper( + permissions.retrieve, + ) + self.delete = to_raw_response_wrapper( + permissions.delete, + ) + + +class AsyncPermissionsResourceWithRawResponse: + def __init__(self, permissions: AsyncPermissionsResource) -> None: + self._permissions = permissions + + self.create = async_to_raw_response_wrapper( + permissions.create, + ) + self.retrieve = async_to_raw_response_wrapper( + permissions.retrieve, + ) + self.delete = async_to_raw_response_wrapper( + permissions.delete, + ) + + +class PermissionsResourceWithStreamingResponse: + def __init__(self, permissions: PermissionsResource) -> None: + self._permissions = permissions + + self.create = to_streamed_response_wrapper( + permissions.create, + ) + self.retrieve = to_streamed_response_wrapper( + permissions.retrieve, + ) + self.delete = to_streamed_response_wrapper( + permissions.delete, + ) + + +class AsyncPermissionsResourceWithStreamingResponse: + def __init__(self, permissions: AsyncPermissionsResource) -> None: + self._permissions = permissions + + self.create = async_to_streamed_response_wrapper( + permissions.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + permissions.retrieve, + ) + self.delete = async_to_streamed_response_wrapper( + permissions.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py new file mode 100644 index 00000000..8b4956b1 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from .jobs.jobs import ( + JobsResource, + AsyncJobsResource, + JobsResourceWithRawResponse, + AsyncJobsResourceWithRawResponse, + JobsResourceWithStreamingResponse, + AsyncJobsResourceWithStreamingResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource +from .checkpoints.checkpoints import ( + CheckpointsResource, + AsyncCheckpointsResource, + CheckpointsResourceWithRawResponse, + AsyncCheckpointsResourceWithRawResponse, + CheckpointsResourceWithStreamingResponse, + AsyncCheckpointsResourceWithStreamingResponse, +) + +__all__ = ["FineTuningResource", "AsyncFineTuningResource"] + + +class FineTuningResource(SyncAPIResource): + @cached_property + def checkpoints(self) -> CheckpointsResource: + return CheckpointsResource(self._client) + + @cached_property + def jobs(self) -> JobsResource: + return JobsResource(self._client) + + @cached_property + def with_raw_response(self) -> FineTuningResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return FineTuningResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FineTuningResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return FineTuningResourceWithStreamingResponse(self) + + +class AsyncFineTuningResource(AsyncAPIResource): + @cached_property + def checkpoints(self) -> AsyncCheckpointsResource: + return AsyncCheckpointsResource(self._client) + + @cached_property + def jobs(self) -> AsyncJobsResource: + return AsyncJobsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFineTuningResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncFineTuningResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFineTuningResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncFineTuningResourceWithStreamingResponse(self) + + +class FineTuningResourceWithRawResponse: + def __init__(self, fine_tuning: FineTuningResource) -> None: + self._fine_tuning = fine_tuning + + @cached_property + def checkpoints(self) -> CheckpointsResourceWithRawResponse: + return CheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints) + + @cached_property + def jobs(self) -> JobsResourceWithRawResponse: + return JobsResourceWithRawResponse(self._fine_tuning.jobs) + + +class AsyncFineTuningResourceWithRawResponse: + def __init__(self, fine_tuning: AsyncFineTuningResource) -> None: + self._fine_tuning = fine_tuning + + @cached_property + def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse: + return AsyncCheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints) + + @cached_property + def jobs(self) -> AsyncJobsResourceWithRawResponse: + return AsyncJobsResourceWithRawResponse(self._fine_tuning.jobs) + + +class FineTuningResourceWithStreamingResponse: + def __init__(self, fine_tuning: FineTuningResource) -> None: + self._fine_tuning = fine_tuning + + @cached_property + def checkpoints(self) -> CheckpointsResourceWithStreamingResponse: + return CheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints) + + @cached_property + def jobs(self) -> JobsResourceWithStreamingResponse: + return JobsResourceWithStreamingResponse(self._fine_tuning.jobs) + + +class AsyncFineTuningResourceWithStreamingResponse: + def __init__(self, fine_tuning: AsyncFineTuningResource) -> None: + self._fine_tuning = fine_tuning + + @cached_property + def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse: + return AsyncCheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints) + + @cached_property + def jobs(self) -> AsyncJobsResourceWithStreamingResponse: + return AsyncJobsResourceWithStreamingResponse(self._fine_tuning.jobs) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py new file mode 100644 index 00000000..90e643d7 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .jobs import ( + JobsResource, + AsyncJobsResource, + JobsResourceWithRawResponse, + AsyncJobsResourceWithRawResponse, + JobsResourceWithStreamingResponse, + AsyncJobsResourceWithStreamingResponse, +) +from .events import ( + EventsResource, + AsyncEventsResource, + EventsResourceWithRawResponse, + AsyncEventsResourceWithRawResponse, + EventsResourceWithStreamingResponse, + AsyncEventsResourceWithStreamingResponse, +) +from .checkpoints import ( + CheckpointsResource, + AsyncCheckpointsResource, + CheckpointsResourceWithRawResponse, + AsyncCheckpointsResourceWithRawResponse, + CheckpointsResourceWithStreamingResponse, + AsyncCheckpointsResourceWithStreamingResponse, +) + +__all__ = [ + "CheckpointsResource", + "AsyncCheckpointsResource", + "CheckpointsResourceWithRawResponse", + "AsyncCheckpointsResourceWithRawResponse", + "CheckpointsResourceWithStreamingResponse", + "AsyncCheckpointsResourceWithStreamingResponse", + "EventsResource", + "AsyncEventsResource", + "EventsResourceWithRawResponse", + "AsyncEventsResourceWithRawResponse", + "EventsResourceWithStreamingResponse", + "AsyncEventsResourceWithStreamingResponse", + "JobsResource", + "AsyncJobsResource", + "JobsResourceWithRawResponse", + "AsyncJobsResourceWithRawResponse", + "JobsResourceWithStreamingResponse", + "AsyncJobsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py new file mode 100644 index 00000000..d9ade070 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py @@ -0,0 +1,197 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.fine_tuning.jobs import checkpoint_retrieve_params +from ....types.fine_tuning.jobs.checkpoint_retrieve_response import CheckpointRetrieveResponse + +__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"] + + +class CheckpointsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CheckpointsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return CheckpointsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return CheckpointsResourceWithStreamingResponse(self) + + def retrieve( + self, + fine_tuning_job_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CheckpointRetrieveResponse: + """ + List checkpoints for a fine-tuning job. + + Args: + after: Identifier for the last checkpoint ID from the previous pagination request. + + limit: Number of checkpoints to retrieve. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return self._get( + f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + checkpoint_retrieve_params.CheckpointRetrieveParams, + ), + ), + cast_to=CheckpointRetrieveResponse, + ) + + +class AsyncCheckpointsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncCheckpointsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncCheckpointsResourceWithStreamingResponse(self) + + async def retrieve( + self, + fine_tuning_job_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CheckpointRetrieveResponse: + """ + List checkpoints for a fine-tuning job. + + Args: + after: Identifier for the last checkpoint ID from the previous pagination request. + + limit: Number of checkpoints to retrieve. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return await self._get( + f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + checkpoint_retrieve_params.CheckpointRetrieveParams, + ), + ), + cast_to=CheckpointRetrieveResponse, + ) + + +class CheckpointsResourceWithRawResponse: + def __init__(self, checkpoints: CheckpointsResource) -> None: + self._checkpoints = checkpoints + + self.retrieve = to_raw_response_wrapper( + checkpoints.retrieve, + ) + + +class AsyncCheckpointsResourceWithRawResponse: + def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: + self._checkpoints = checkpoints + + self.retrieve = async_to_raw_response_wrapper( + checkpoints.retrieve, + ) + + +class CheckpointsResourceWithStreamingResponse: + def __init__(self, checkpoints: CheckpointsResource) -> None: + self._checkpoints = checkpoints + + self.retrieve = to_streamed_response_wrapper( + checkpoints.retrieve, + ) + + +class AsyncCheckpointsResourceWithStreamingResponse: + def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: + self._checkpoints = checkpoints + + self.retrieve = async_to_streamed_response_wrapper( + checkpoints.retrieve, + ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py new file mode 100644 index 00000000..6005084f --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py @@ -0,0 +1,197 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.fine_tuning.jobs import event_retrieve_params +from ....types.fine_tuning.jobs.event_retrieve_response import EventRetrieveResponse + +__all__ = ["EventsResource", "AsyncEventsResource"] + + +class EventsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EventsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return EventsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EventsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return EventsResourceWithStreamingResponse(self) + + def retrieve( + self, + fine_tuning_job_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EventRetrieveResponse: + """ + Get status updates for a fine-tuning job. + + Args: + after: Identifier for the last event from the previous pagination request. + + limit: Number of events to retrieve. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return self._get( + f"/fine_tuning/jobs/{fine_tuning_job_id}/events", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + event_retrieve_params.EventRetrieveParams, + ), + ), + cast_to=EventRetrieveResponse, + ) + + +class AsyncEventsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEventsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncEventsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEventsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncEventsResourceWithStreamingResponse(self) + + async def retrieve( + self, + fine_tuning_job_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EventRetrieveResponse: + """ + Get status updates for a fine-tuning job. + + Args: + after: Identifier for the last event from the previous pagination request. + + limit: Number of events to retrieve. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return await self._get( + f"/fine_tuning/jobs/{fine_tuning_job_id}/events", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + event_retrieve_params.EventRetrieveParams, + ), + ), + cast_to=EventRetrieveResponse, + ) + + +class EventsResourceWithRawResponse: + def __init__(self, events: EventsResource) -> None: + self._events = events + + self.retrieve = to_raw_response_wrapper( + events.retrieve, + ) + + +class AsyncEventsResourceWithRawResponse: + def __init__(self, events: AsyncEventsResource) -> None: + self._events = events + + self.retrieve = async_to_raw_response_wrapper( + events.retrieve, + ) + + +class EventsResourceWithStreamingResponse: + def __init__(self, events: EventsResource) -> None: + self._events = events + + self.retrieve = to_streamed_response_wrapper( + events.retrieve, + ) + + +class AsyncEventsResourceWithStreamingResponse: + def __init__(self, events: AsyncEventsResource) -> None: + self._events = events + + self.retrieve = async_to_streamed_response_wrapper( + events.retrieve, + ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py new file mode 100644 index 00000000..86a7ae4b --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py @@ -0,0 +1,668 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from .events import ( + EventsResource, + AsyncEventsResource, + EventsResourceWithRawResponse, + AsyncEventsResourceWithRawResponse, + EventsResourceWithStreamingResponse, + AsyncEventsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from .checkpoints import ( + CheckpointsResource, + AsyncCheckpointsResource, + CheckpointsResourceWithRawResponse, + AsyncCheckpointsResourceWithRawResponse, + CheckpointsResourceWithStreamingResponse, + AsyncCheckpointsResourceWithStreamingResponse, +) +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.fine_tuning import job_list_params, job_create_params +from ....types.fine_tuning.fine_tuning_job import FineTuningJob +from ....types.fine_tuning.job_list_response import JobListResponse +from ....types.fine_tuning.fine_tune_method_param import FineTuneMethodParam + +__all__ = ["JobsResource", "AsyncJobsResource"] + + +class JobsResource(SyncAPIResource): + @cached_property + def checkpoints(self) -> CheckpointsResource: + return CheckpointsResource(self._client) + + @cached_property + def events(self) -> EventsResource: + return EventsResource(self._client) + + @cached_property + def with_raw_response(self) -> JobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return JobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> JobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return JobsResourceWithStreamingResponse(self) + + def create( + self, + *, + model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], + training_file: str, + hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, + integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + method: FineTuneMethodParam | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + suffix: Optional[str] | NotGiven = NOT_GIVEN, + validation_file: Optional[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + """ + Creates a fine-tuning job which begins the process of creating a new model from + a given dataset. + + Response includes details of the enqueued job including job status and the name + of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + + Args: + model: The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + + training_file: The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload + your file with the purpose `fine-tune`. + + The contents of the file should differ depending on if the model uses the + [chat](/docs/api-reference/fine-tuning/chat-input), + [completions](/docs/api-reference/fine-tuning/completions-input) format, or if + the fine-tuning method uses the + [preference](/docs/api-reference/fine-tuning/preference-input) format. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + + hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. + + integrations: A list of integrations to enable for your fine-tuning job. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + method: The method used for fine-tuning. + + seed: The seed controls the reproducibility of the job. Passing in the same seed and + job parameters should produce the same results, but may differ in rare cases. If + a seed is not specified, one will be generated for you. + + suffix: A string of up to 64 characters that will be added to your fine-tuned model + name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + + validation_file: The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics + periodically during fine-tuning. These metrics can be viewed in the fine-tuning + results file. The same data should not be present in both train and validation + files. + + Your dataset must be formatted as a JSONL file. You must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/fine_tuning/jobs", + body=maybe_transform( + { + "model": model, + "training_file": training_file, + "hyperparameters": hyperparameters, + "integrations": integrations, + "metadata": metadata, + "method": method, + "seed": seed, + "suffix": suffix, + "validation_file": validation_file, + }, + job_create_params.JobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FineTuningJob, + ) + + def retrieve( + self, + fine_tuning_job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + """ + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return self._get( + f"/fine_tuning/jobs/{fine_tuning_job_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FineTuningJob, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> JobListResponse: + """ + List your organization's fine-tuning jobs + + Args: + after: Identifier for the last job from the previous pagination request. + + limit: Number of fine-tuning jobs to retrieve. + + metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + Alternatively, set `metadata=null` to indicate no metadata. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/fine_tuning/jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "metadata": metadata, + }, + job_list_params.JobListParams, + ), + ), + cast_to=JobListResponse, + ) + + def cancel( + self, + fine_tuning_job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + """ + Immediately cancel a fine-tune job. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return self._post( + f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FineTuningJob, + ) + + +class AsyncJobsResource(AsyncAPIResource): + @cached_property + def checkpoints(self) -> AsyncCheckpointsResource: + return AsyncCheckpointsResource(self._client) + + @cached_property + def events(self) -> AsyncEventsResource: + return AsyncEventsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncJobsResourceWithStreamingResponse(self) + + async def create( + self, + *, + model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], + training_file: str, + hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, + integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + method: FineTuneMethodParam | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + suffix: Optional[str] | NotGiven = NOT_GIVEN, + validation_file: Optional[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + """ + Creates a fine-tuning job which begins the process of creating a new model from + a given dataset. + + Response includes details of the enqueued job including job status and the name + of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + + Args: + model: The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + + training_file: The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload + your file with the purpose `fine-tune`. + + The contents of the file should differ depending on if the model uses the + [chat](/docs/api-reference/fine-tuning/chat-input), + [completions](/docs/api-reference/fine-tuning/completions-input) format, or if + the fine-tuning method uses the + [preference](/docs/api-reference/fine-tuning/preference-input) format. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + + hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. + + integrations: A list of integrations to enable for your fine-tuning job. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + method: The method used for fine-tuning. + + seed: The seed controls the reproducibility of the job. Passing in the same seed and + job parameters should produce the same results, but may differ in rare cases. If + a seed is not specified, one will be generated for you. + + suffix: A string of up to 64 characters that will be added to your fine-tuned model + name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + + validation_file: The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics + periodically during fine-tuning. These metrics can be viewed in the fine-tuning + results file. The same data should not be present in both train and validation + files. + + Your dataset must be formatted as a JSONL file. You must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/fine_tuning/jobs", + body=await async_maybe_transform( + { + "model": model, + "training_file": training_file, + "hyperparameters": hyperparameters, + "integrations": integrations, + "metadata": metadata, + "method": method, + "seed": seed, + "suffix": suffix, + "validation_file": validation_file, + }, + job_create_params.JobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FineTuningJob, + ) + + async def retrieve( + self, + fine_tuning_job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + """ + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return await self._get( + f"/fine_tuning/jobs/{fine_tuning_job_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FineTuningJob, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> JobListResponse: + """ + List your organization's fine-tuning jobs + + Args: + after: Identifier for the last job from the previous pagination request. + + limit: Number of fine-tuning jobs to retrieve. + + metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + Alternatively, set `metadata=null` to indicate no metadata. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/fine_tuning/jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + "metadata": metadata, + }, + job_list_params.JobListParams, + ), + ), + cast_to=JobListResponse, + ) + + async def cancel( + self, + fine_tuning_job_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FineTuningJob: + """ + Immediately cancel a fine-tune job. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not fine_tuning_job_id: + raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") + return await self._post( + f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FineTuningJob, + ) + + +class JobsResourceWithRawResponse: + def __init__(self, jobs: JobsResource) -> None: + self._jobs = jobs + + self.create = to_raw_response_wrapper( + jobs.create, + ) + self.retrieve = to_raw_response_wrapper( + jobs.retrieve, + ) + self.list = to_raw_response_wrapper( + jobs.list, + ) + self.cancel = to_raw_response_wrapper( + jobs.cancel, + ) + + @cached_property + def checkpoints(self) -> CheckpointsResourceWithRawResponse: + return CheckpointsResourceWithRawResponse(self._jobs.checkpoints) + + @cached_property + def events(self) -> EventsResourceWithRawResponse: + return EventsResourceWithRawResponse(self._jobs.events) + + +class AsyncJobsResourceWithRawResponse: + def __init__(self, jobs: AsyncJobsResource) -> None: + self._jobs = jobs + + self.create = async_to_raw_response_wrapper( + jobs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + jobs.retrieve, + ) + self.list = async_to_raw_response_wrapper( + jobs.list, + ) + self.cancel = async_to_raw_response_wrapper( + jobs.cancel, + ) + + @cached_property + def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse: + return AsyncCheckpointsResourceWithRawResponse(self._jobs.checkpoints) + + @cached_property + def events(self) -> AsyncEventsResourceWithRawResponse: + return AsyncEventsResourceWithRawResponse(self._jobs.events) + + +class JobsResourceWithStreamingResponse: + def __init__(self, jobs: JobsResource) -> None: + self._jobs = jobs + + self.create = to_streamed_response_wrapper( + jobs.create, + ) + self.retrieve = to_streamed_response_wrapper( + jobs.retrieve, + ) + self.list = to_streamed_response_wrapper( + jobs.list, + ) + self.cancel = to_streamed_response_wrapper( + jobs.cancel, + ) + + @cached_property + def checkpoints(self) -> CheckpointsResourceWithStreamingResponse: + return CheckpointsResourceWithStreamingResponse(self._jobs.checkpoints) + + @cached_property + def events(self) -> EventsResourceWithStreamingResponse: + return EventsResourceWithStreamingResponse(self._jobs.events) + + +class AsyncJobsResourceWithStreamingResponse: + def __init__(self, jobs: AsyncJobsResource) -> None: + self._jobs = jobs + + self.create = async_to_streamed_response_wrapper( + jobs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + jobs.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + jobs.list, + ) + self.cancel = async_to_streamed_response_wrapper( + jobs.cancel, + ) + + @cached_property + def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse: + return AsyncCheckpointsResourceWithStreamingResponse(self._jobs.checkpoints) + + @cached_property + def events(self) -> AsyncEventsResourceWithStreamingResponse: + return AsyncEventsResourceWithStreamingResponse(self._jobs.events) diff --git a/src/digitalocean_genai_sdk/resources/images.py b/src/digitalocean_genai_sdk/resources/images.py new file mode 100644 index 00000000..56a52184 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/images.py @@ -0,0 +1,592 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Mapping, Optional, cast +from typing_extensions import Literal + +import httpx + +from ..types import image_create_edit_params, image_create_variation_params, image_create_generation_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.images_response import ImagesResponse + +__all__ = ["ImagesResource", "AsyncImagesResource"] + + +class ImagesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ImagesResourceWithStreamingResponse(self) + + def create_edit( + self, + *, + image: FileTypes, + prompt: str, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates an edited or extended image given an original image and a prompt. + + Args: + image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + is not provided, image must have transparency, which will be used as the mask. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "image": image, + "prompt": prompt, + "mask": mask, + "model": model, + "n": n, + "response_format": response_format, + "size": size, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/images/edits", + body=maybe_transform(body, image_create_edit_params.ImageCreateEditParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + def create_generation( + self, + *, + prompt: str, + model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates an image given a prompt. + + Args: + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2` and 4000 characters for `dall-e-3`. + + model: The model to use for image generation. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + quality: The quality of the image that will be generated. `hd` creates images with finer + details and greater consistency across the image. This param is only supported + for `dall-e-3`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + `1024x1792` for `dall-e-3` models. + + style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid + causes the model to lean towards generating hyper-real and dramatic images. + Natural causes the model to produce more natural, less hyper-real looking + images. This param is only supported for `dall-e-3`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/images/generations", + body=maybe_transform( + { + "prompt": prompt, + "model": model, + "n": n, + "quality": quality, + "response_format": response_format, + "size": size, + "style": style, + "user": user, + }, + image_create_generation_params.ImageCreateGenerationParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + def create_variation( + self, + *, + image: FileTypes, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates a variation of a given image. + + Args: + image: The image to use as the basis for the variation(s). Must be a valid PNG file, + less than 4MB, and square. + + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "image": image, + "model": model, + "n": n, + "response_format": response_format, + "size": size, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/images/variations", + body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + +class AsyncImagesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncImagesResourceWithStreamingResponse(self) + + async def create_edit( + self, + *, + image: FileTypes, + prompt: str, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates an edited or extended image given an original image and a prompt. + + Args: + image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + is not provided, image must have transparency, which will be used as the mask. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "image": image, + "prompt": prompt, + "mask": mask, + "model": model, + "n": n, + "response_format": response_format, + "size": size, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/images/edits", + body=await async_maybe_transform(body, image_create_edit_params.ImageCreateEditParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + async def create_generation( + self, + *, + prompt: str, + model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates an image given a prompt. + + Args: + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2` and 4000 characters for `dall-e-3`. + + model: The model to use for image generation. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + quality: The quality of the image that will be generated. `hd` creates images with finer + details and greater consistency across the image. This param is only supported + for `dall-e-3`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + `1024x1792` for `dall-e-3` models. + + style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid + causes the model to lean towards generating hyper-real and dramatic images. + Natural causes the model to produce more natural, less hyper-real looking + images. This param is only supported for `dall-e-3`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/images/generations", + body=await async_maybe_transform( + { + "prompt": prompt, + "model": model, + "n": n, + "quality": quality, + "response_format": response_format, + "size": size, + "style": style, + "user": user, + }, + image_create_generation_params.ImageCreateGenerationParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + async def create_variation( + self, + *, + image: FileTypes, + model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates a variation of a given image. + + Args: + image: The image to use as the basis for the variation(s). Must be a valid PNG file, + less than 4MB, and square. + + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "image": image, + "model": model, + "n": n, + "response_format": response_format, + "size": size, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/images/variations", + body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + +class ImagesResourceWithRawResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + self.create_edit = to_raw_response_wrapper( + images.create_edit, + ) + self.create_generation = to_raw_response_wrapper( + images.create_generation, + ) + self.create_variation = to_raw_response_wrapper( + images.create_variation, + ) + + +class AsyncImagesResourceWithRawResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + self.create_edit = async_to_raw_response_wrapper( + images.create_edit, + ) + self.create_generation = async_to_raw_response_wrapper( + images.create_generation, + ) + self.create_variation = async_to_raw_response_wrapper( + images.create_variation, + ) + + +class ImagesResourceWithStreamingResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + self.create_edit = to_streamed_response_wrapper( + images.create_edit, + ) + self.create_generation = to_streamed_response_wrapper( + images.create_generation, + ) + self.create_variation = to_streamed_response_wrapper( + images.create_variation, + ) + + +class AsyncImagesResourceWithStreamingResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + self.create_edit = async_to_streamed_response_wrapper( + images.create_edit, + ) + self.create_generation = async_to_streamed_response_wrapper( + images.create_generation, + ) + self.create_variation = async_to_streamed_response_wrapper( + images.create_variation, + ) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py new file mode 100644 index 00000000..53775057 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/models.py @@ -0,0 +1,305 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..types.model import Model +from .._base_client import make_request_options +from ..types.model_list_response import ModelListResponse +from ..types.model_delete_response import ModelDeleteResponse + +__all__ = ["ModelsResource", "AsyncModelsResource"] + + +class ModelsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ModelsResourceWithStreamingResponse(self) + + def retrieve( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Model: + """ + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return self._get( + f"/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Model, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return self._get( + "/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelListResponse, + ) + + def delete( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelDeleteResponse: + """Delete a fine-tuned model. + + You must have the Owner role in your organization to + delete a model. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return self._delete( + f"/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelDeleteResponse, + ) + + +class AsyncModelsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncModelsResourceWithStreamingResponse(self) + + async def retrieve( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Model: + """ + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return await self._get( + f"/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Model, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return await self._get( + "/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelListResponse, + ) + + async def delete( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelDeleteResponse: + """Delete a fine-tuned model. + + You must have the Owner role in your organization to + delete a model. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return await self._delete( + f"/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelDeleteResponse, + ) + + +class ModelsResourceWithRawResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.retrieve = to_raw_response_wrapper( + models.retrieve, + ) + self.list = to_raw_response_wrapper( + models.list, + ) + self.delete = to_raw_response_wrapper( + models.delete, + ) + + +class AsyncModelsResourceWithRawResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.retrieve = async_to_raw_response_wrapper( + models.retrieve, + ) + self.list = async_to_raw_response_wrapper( + models.list, + ) + self.delete = async_to_raw_response_wrapper( + models.delete, + ) + + +class ModelsResourceWithStreamingResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.retrieve = to_streamed_response_wrapper( + models.retrieve, + ) + self.list = to_streamed_response_wrapper( + models.list, + ) + self.delete = to_streamed_response_wrapper( + models.delete, + ) + + +class AsyncModelsResourceWithStreamingResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.retrieve = async_to_streamed_response_wrapper( + models.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + models.list, + ) + self.delete = async_to_streamed_response_wrapper( + models.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/moderations.py b/src/digitalocean_genai_sdk/resources/moderations.py new file mode 100644 index 00000000..e9404243 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/moderations.py @@ -0,0 +1,216 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal + +import httpx + +from ..types import moderation_classify_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.moderation_classify_response import ModerationClassifyResponse + +__all__ = ["ModerationsResource", "AsyncModerationsResource"] + + +class ModerationsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ModerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ModerationsResourceWithStreamingResponse(self) + + def classify( + self, + *, + input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]], + model: Union[ + str, + Literal[ + "omni-moderation-latest", + "omni-moderation-2024-09-26", + "text-moderation-latest", + "text-moderation-stable", + ], + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModerationClassifyResponse: + """Classifies if text and/or image inputs are potentially harmful. + + Learn more in + the [moderation guide](/docs/guides/moderation). + + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. + + model: The content moderation model you would like to use. Learn more in + [the moderation guide](/docs/guides/moderation), and learn about available + models [here](/docs/models#moderation). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/moderations", + body=maybe_transform( + { + "input": input, + "model": model, + }, + moderation_classify_params.ModerationClassifyParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModerationClassifyResponse, + ) + + +class AsyncModerationsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncModerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncModerationsResourceWithStreamingResponse(self) + + async def classify( + self, + *, + input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]], + model: Union[ + str, + Literal[ + "omni-moderation-latest", + "omni-moderation-2024-09-26", + "text-moderation-latest", + "text-moderation-stable", + ], + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModerationClassifyResponse: + """Classifies if text and/or image inputs are potentially harmful. + + Learn more in + the [moderation guide](/docs/guides/moderation). + + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. + + model: The content moderation model you would like to use. Learn more in + [the moderation guide](/docs/guides/moderation), and learn about available + models [here](/docs/models#moderation). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/moderations", + body=await async_maybe_transform( + { + "input": input, + "model": model, + }, + moderation_classify_params.ModerationClassifyParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModerationClassifyResponse, + ) + + +class ModerationsResourceWithRawResponse: + def __init__(self, moderations: ModerationsResource) -> None: + self._moderations = moderations + + self.classify = to_raw_response_wrapper( + moderations.classify, + ) + + +class AsyncModerationsResourceWithRawResponse: + def __init__(self, moderations: AsyncModerationsResource) -> None: + self._moderations = moderations + + self.classify = async_to_raw_response_wrapper( + moderations.classify, + ) + + +class ModerationsResourceWithStreamingResponse: + def __init__(self, moderations: ModerationsResource) -> None: + self._moderations = moderations + + self.classify = to_streamed_response_wrapper( + moderations.classify, + ) + + +class AsyncModerationsResourceWithStreamingResponse: + def __init__(self, moderations: AsyncModerationsResource) -> None: + self._moderations = moderations + + self.classify = async_to_streamed_response_wrapper( + moderations.classify, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/__init__.py b/src/digitalocean_genai_sdk/resources/organization/__init__.py new file mode 100644 index 00000000..cf206d71 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/__init__.py @@ -0,0 +1,89 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .usage import ( + UsageResource, + AsyncUsageResource, + UsageResourceWithRawResponse, + AsyncUsageResourceWithRawResponse, + UsageResourceWithStreamingResponse, + AsyncUsageResourceWithStreamingResponse, +) +from .users import ( + UsersResource, + AsyncUsersResource, + UsersResourceWithRawResponse, + AsyncUsersResourceWithRawResponse, + UsersResourceWithStreamingResponse, + AsyncUsersResourceWithStreamingResponse, +) +from .invites import ( + InvitesResource, + AsyncInvitesResource, + InvitesResourceWithRawResponse, + AsyncInvitesResourceWithRawResponse, + InvitesResourceWithStreamingResponse, + AsyncInvitesResourceWithStreamingResponse, +) +from .projects import ( + ProjectsResource, + AsyncProjectsResource, + ProjectsResourceWithRawResponse, + AsyncProjectsResourceWithRawResponse, + ProjectsResourceWithStreamingResponse, + AsyncProjectsResourceWithStreamingResponse, +) +from .organization import ( + OrganizationResource, + AsyncOrganizationResource, + OrganizationResourceWithRawResponse, + AsyncOrganizationResourceWithRawResponse, + OrganizationResourceWithStreamingResponse, + AsyncOrganizationResourceWithStreamingResponse, +) +from .admin_api_keys import ( + AdminAPIKeysResource, + AsyncAdminAPIKeysResource, + AdminAPIKeysResourceWithRawResponse, + AsyncAdminAPIKeysResourceWithRawResponse, + AdminAPIKeysResourceWithStreamingResponse, + AsyncAdminAPIKeysResourceWithStreamingResponse, +) + +__all__ = [ + "AdminAPIKeysResource", + "AsyncAdminAPIKeysResource", + "AdminAPIKeysResourceWithRawResponse", + "AsyncAdminAPIKeysResourceWithRawResponse", + "AdminAPIKeysResourceWithStreamingResponse", + "AsyncAdminAPIKeysResourceWithStreamingResponse", + "InvitesResource", + "AsyncInvitesResource", + "InvitesResourceWithRawResponse", + "AsyncInvitesResourceWithRawResponse", + "InvitesResourceWithStreamingResponse", + "AsyncInvitesResourceWithStreamingResponse", + "ProjectsResource", + "AsyncProjectsResource", + "ProjectsResourceWithRawResponse", + "AsyncProjectsResourceWithRawResponse", + "ProjectsResourceWithStreamingResponse", + "AsyncProjectsResourceWithStreamingResponse", + "UsageResource", + "AsyncUsageResource", + "UsageResourceWithRawResponse", + "AsyncUsageResourceWithRawResponse", + "UsageResourceWithStreamingResponse", + "AsyncUsageResourceWithStreamingResponse", + "UsersResource", + "AsyncUsersResource", + "UsersResourceWithRawResponse", + "AsyncUsersResourceWithRawResponse", + "UsersResourceWithStreamingResponse", + "AsyncUsersResourceWithStreamingResponse", + "OrganizationResource", + "AsyncOrganizationResource", + "OrganizationResourceWithRawResponse", + "AsyncOrganizationResourceWithRawResponse", + "OrganizationResourceWithStreamingResponse", + "AsyncOrganizationResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py new file mode 100644 index 00000000..7224871f --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py @@ -0,0 +1,444 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.organization import admin_api_key_list_params, admin_api_key_create_params +from ...types.organization.admin_api_key import AdminAPIKey +from ...types.organization.admin_api_key_list_response import AdminAPIKeyListResponse +from ...types.organization.admin_api_key_delete_response import AdminAPIKeyDeleteResponse + +__all__ = ["AdminAPIKeysResource", "AsyncAdminAPIKeysResource"] + + +class AdminAPIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AdminAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AdminAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AdminAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AdminAPIKeysResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKey: + """ + Create a new admin-level API key for the organization. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/organization/admin_api_keys", + body=maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AdminAPIKey, + ) + + def retrieve( + self, + key_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKey: + """ + Get details for a specific organization API key by its ID. + + Args: + key_id: The ID of the API key. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return self._get( + f"/organization/admin_api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AdminAPIKey, + ) + + def list( + self, + *, + after: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKeyListResponse: + """ + Retrieve a paginated list of organization admin API keys. + + Args: + after: Return keys with IDs that come after this ID in the pagination order. + + limit: Maximum number of keys to return. + + order: Order results by creation time, ascending or descending. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/admin_api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + admin_api_key_list_params.AdminAPIKeyListParams, + ), + ), + cast_to=AdminAPIKeyListResponse, + ) + + def delete( + self, + key_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKeyDeleteResponse: + """ + Delete the specified admin API key. + + Args: + key_id: The ID of the API key to be deleted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return self._delete( + f"/organization/admin_api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AdminAPIKeyDeleteResponse, + ) + + +class AsyncAdminAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAdminAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAdminAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAdminAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKey: + """ + Create a new admin-level API key for the organization. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/organization/admin_api_keys", + body=await async_maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AdminAPIKey, + ) + + async def retrieve( + self, + key_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKey: + """ + Get details for a specific organization API key by its ID. + + Args: + key_id: The ID of the API key. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return await self._get( + f"/organization/admin_api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AdminAPIKey, + ) + + async def list( + self, + *, + after: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKeyListResponse: + """ + Retrieve a paginated list of organization admin API keys. + + Args: + after: Return keys with IDs that come after this ID in the pagination order. + + limit: Maximum number of keys to return. + + order: Order results by creation time, ascending or descending. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/admin_api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + }, + admin_api_key_list_params.AdminAPIKeyListParams, + ), + ), + cast_to=AdminAPIKeyListResponse, + ) + + async def delete( + self, + key_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AdminAPIKeyDeleteResponse: + """ + Delete the specified admin API key. + + Args: + key_id: The ID of the API key to be deleted. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return await self._delete( + f"/organization/admin_api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AdminAPIKeyDeleteResponse, + ) + + +class AdminAPIKeysResourceWithRawResponse: + def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None: + self._admin_api_keys = admin_api_keys + + self.create = to_raw_response_wrapper( + admin_api_keys.create, + ) + self.retrieve = to_raw_response_wrapper( + admin_api_keys.retrieve, + ) + self.list = to_raw_response_wrapper( + admin_api_keys.list, + ) + self.delete = to_raw_response_wrapper( + admin_api_keys.delete, + ) + + +class AsyncAdminAPIKeysResourceWithRawResponse: + def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None: + self._admin_api_keys = admin_api_keys + + self.create = async_to_raw_response_wrapper( + admin_api_keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + admin_api_keys.retrieve, + ) + self.list = async_to_raw_response_wrapper( + admin_api_keys.list, + ) + self.delete = async_to_raw_response_wrapper( + admin_api_keys.delete, + ) + + +class AdminAPIKeysResourceWithStreamingResponse: + def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None: + self._admin_api_keys = admin_api_keys + + self.create = to_streamed_response_wrapper( + admin_api_keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + admin_api_keys.retrieve, + ) + self.list = to_streamed_response_wrapper( + admin_api_keys.list, + ) + self.delete = to_streamed_response_wrapper( + admin_api_keys.delete, + ) + + +class AsyncAdminAPIKeysResourceWithStreamingResponse: + def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None: + self._admin_api_keys = admin_api_keys + + self.create = async_to_streamed_response_wrapper( + admin_api_keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + admin_api_keys.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + admin_api_keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + admin_api_keys.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/invites.py b/src/digitalocean_genai_sdk/resources/organization/invites.py new file mode 100644 index 00000000..16bd17bc --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/invites.py @@ -0,0 +1,476 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.organization import invite_list_params, invite_create_params +from ...types.organization.invite import Invite +from ...types.organization.invite_list_response import InviteListResponse +from ...types.organization.invite_delete_response import InviteDeleteResponse + +__all__ = ["InvitesResource", "AsyncInvitesResource"] + + +class InvitesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> InvitesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return InvitesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> InvitesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return InvitesResourceWithStreamingResponse(self) + + def create( + self, + *, + email: str, + role: Literal["reader", "owner"], + projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Invite: + """Create an invite for a user to the organization. + + The invite must be accepted by + the user before they have access to the organization. + + Args: + email: Send an email to this address + + role: `owner` or `reader` + + projects: An array of projects to which membership is granted at the same time the org + invite is accepted. If omitted, the user will be invited to the default project + for compatibility with legacy behavior. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/organization/invites", + body=maybe_transform( + { + "email": email, + "role": role, + "projects": projects, + }, + invite_create_params.InviteCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Invite, + ) + + def retrieve( + self, + invite_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Invite: + """ + Retrieves an invite. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not invite_id: + raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") + return self._get( + f"/organization/invites/{invite_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Invite, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> InviteListResponse: + """ + Returns a list of invites in the organization. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/invites", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + invite_list_params.InviteListParams, + ), + ), + cast_to=InviteListResponse, + ) + + def delete( + self, + invite_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> InviteDeleteResponse: + """Delete an invite. + + If the invite has already been accepted, it cannot be deleted. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not invite_id: + raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") + return self._delete( + f"/organization/invites/{invite_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InviteDeleteResponse, + ) + + +class AsyncInvitesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncInvitesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncInvitesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncInvitesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncInvitesResourceWithStreamingResponse(self) + + async def create( + self, + *, + email: str, + role: Literal["reader", "owner"], + projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Invite: + """Create an invite for a user to the organization. + + The invite must be accepted by + the user before they have access to the organization. + + Args: + email: Send an email to this address + + role: `owner` or `reader` + + projects: An array of projects to which membership is granted at the same time the org + invite is accepted. If omitted, the user will be invited to the default project + for compatibility with legacy behavior. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/organization/invites", + body=await async_maybe_transform( + { + "email": email, + "role": role, + "projects": projects, + }, + invite_create_params.InviteCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Invite, + ) + + async def retrieve( + self, + invite_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Invite: + """ + Retrieves an invite. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not invite_id: + raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") + return await self._get( + f"/organization/invites/{invite_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Invite, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> InviteListResponse: + """ + Returns a list of invites in the organization. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/invites", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + invite_list_params.InviteListParams, + ), + ), + cast_to=InviteListResponse, + ) + + async def delete( + self, + invite_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> InviteDeleteResponse: + """Delete an invite. + + If the invite has already been accepted, it cannot be deleted. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not invite_id: + raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") + return await self._delete( + f"/organization/invites/{invite_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=InviteDeleteResponse, + ) + + +class InvitesResourceWithRawResponse: + def __init__(self, invites: InvitesResource) -> None: + self._invites = invites + + self.create = to_raw_response_wrapper( + invites.create, + ) + self.retrieve = to_raw_response_wrapper( + invites.retrieve, + ) + self.list = to_raw_response_wrapper( + invites.list, + ) + self.delete = to_raw_response_wrapper( + invites.delete, + ) + + +class AsyncInvitesResourceWithRawResponse: + def __init__(self, invites: AsyncInvitesResource) -> None: + self._invites = invites + + self.create = async_to_raw_response_wrapper( + invites.create, + ) + self.retrieve = async_to_raw_response_wrapper( + invites.retrieve, + ) + self.list = async_to_raw_response_wrapper( + invites.list, + ) + self.delete = async_to_raw_response_wrapper( + invites.delete, + ) + + +class InvitesResourceWithStreamingResponse: + def __init__(self, invites: InvitesResource) -> None: + self._invites = invites + + self.create = to_streamed_response_wrapper( + invites.create, + ) + self.retrieve = to_streamed_response_wrapper( + invites.retrieve, + ) + self.list = to_streamed_response_wrapper( + invites.list, + ) + self.delete = to_streamed_response_wrapper( + invites.delete, + ) + + +class AsyncInvitesResourceWithStreamingResponse: + def __init__(self, invites: AsyncInvitesResource) -> None: + self._invites = invites + + self.create = async_to_streamed_response_wrapper( + invites.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + invites.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + invites.list, + ) + self.delete = async_to_streamed_response_wrapper( + invites.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/organization.py b/src/digitalocean_genai_sdk/resources/organization/organization.py new file mode 100644 index 00000000..4a9aa4fb --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/organization.py @@ -0,0 +1,586 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from .usage import ( + UsageResource, + AsyncUsageResource, + UsageResourceWithRawResponse, + AsyncUsageResourceWithRawResponse, + UsageResourceWithStreamingResponse, + AsyncUsageResourceWithStreamingResponse, +) +from .users import ( + UsersResource, + AsyncUsersResource, + UsersResourceWithRawResponse, + AsyncUsersResourceWithRawResponse, + UsersResourceWithStreamingResponse, + AsyncUsersResourceWithStreamingResponse, +) +from ...types import organization_get_costs_params, organization_list_audit_logs_params +from .invites import ( + InvitesResource, + AsyncInvitesResource, + InvitesResourceWithRawResponse, + AsyncInvitesResourceWithRawResponse, + InvitesResourceWithStreamingResponse, + AsyncInvitesResourceWithStreamingResponse, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from .admin_api_keys import ( + AdminAPIKeysResource, + AsyncAdminAPIKeysResource, + AdminAPIKeysResourceWithRawResponse, + AsyncAdminAPIKeysResourceWithRawResponse, + AdminAPIKeysResourceWithStreamingResponse, + AsyncAdminAPIKeysResourceWithStreamingResponse, +) +from .projects.projects import ( + ProjectsResource, + AsyncProjectsResource, + ProjectsResourceWithRawResponse, + AsyncProjectsResourceWithRawResponse, + ProjectsResourceWithStreamingResponse, + AsyncProjectsResourceWithStreamingResponse, +) +from ...types.usage_response import UsageResponse +from ...types.audit_log_event_type import AuditLogEventType +from ...types.organization_list_audit_logs_response import OrganizationListAuditLogsResponse + +__all__ = ["OrganizationResource", "AsyncOrganizationResource"] + + +class OrganizationResource(SyncAPIResource): + @cached_property + def admin_api_keys(self) -> AdminAPIKeysResource: + return AdminAPIKeysResource(self._client) + + @cached_property + def invites(self) -> InvitesResource: + return InvitesResource(self._client) + + @cached_property + def projects(self) -> ProjectsResource: + return ProjectsResource(self._client) + + @cached_property + def usage(self) -> UsageResource: + return UsageResource(self._client) + + @cached_property + def users(self) -> UsersResource: + return UsersResource(self._client) + + @cached_property + def with_raw_response(self) -> OrganizationResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return OrganizationResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OrganizationResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return OrganizationResourceWithStreamingResponse(self) + + def get_costs( + self, + *, + start_time: int, + bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get costs details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default + to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the costs by the specified fields. Support fields include `project_id`, + `line_item` and any combination of them. + + limit: A limit on the number of buckets to be returned. Limit can range between 1 and + 180, and the default is 7. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only costs for these projects. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/costs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "page": page, + "project_ids": project_ids, + }, + organization_get_costs_params.OrganizationGetCostsParams, + ), + ), + cast_to=UsageResponse, + ) + + def list_audit_logs( + self, + *, + actor_emails: List[str] | NotGiven = NOT_GIVEN, + actor_ids: List[str] | NotGiven = NOT_GIVEN, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN, + event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + resource_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OrganizationListAuditLogsResponse: + """ + List user actions and configuration changes within this organization. + + Args: + actor_emails: Return only events performed by users with these emails. + + actor_ids: Return only events performed by these actors. Can be a user ID, a service + account ID, or an api key tracking ID. + + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + effective_at: Return only events whose `effective_at` (Unix seconds) is in this range. + + event_types: Return only events with a `type` in one of these values. For example, + `project.created`. For all options, see the documentation for the + [audit log object](/docs/api-reference/audit-logs/object). + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + project_ids: Return only events for these projects. + + resource_ids: Return only events performed on these targets. For example, a project ID + updated. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/audit_logs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "actor_emails": actor_emails, + "actor_ids": actor_ids, + "after": after, + "before": before, + "effective_at": effective_at, + "event_types": event_types, + "limit": limit, + "project_ids": project_ids, + "resource_ids": resource_ids, + }, + organization_list_audit_logs_params.OrganizationListAuditLogsParams, + ), + ), + cast_to=OrganizationListAuditLogsResponse, + ) + + +class AsyncOrganizationResource(AsyncAPIResource): + @cached_property + def admin_api_keys(self) -> AsyncAdminAPIKeysResource: + return AsyncAdminAPIKeysResource(self._client) + + @cached_property + def invites(self) -> AsyncInvitesResource: + return AsyncInvitesResource(self._client) + + @cached_property + def projects(self) -> AsyncProjectsResource: + return AsyncProjectsResource(self._client) + + @cached_property + def usage(self) -> AsyncUsageResource: + return AsyncUsageResource(self._client) + + @cached_property + def users(self) -> AsyncUsersResource: + return AsyncUsersResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOrganizationResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncOrganizationResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOrganizationResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncOrganizationResourceWithStreamingResponse(self) + + async def get_costs( + self, + *, + start_time: int, + bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get costs details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default + to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the costs by the specified fields. Support fields include `project_id`, + `line_item` and any combination of them. + + limit: A limit on the number of buckets to be returned. Limit can range between 1 and + 180, and the default is 7. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only costs for these projects. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/costs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "page": page, + "project_ids": project_ids, + }, + organization_get_costs_params.OrganizationGetCostsParams, + ), + ), + cast_to=UsageResponse, + ) + + async def list_audit_logs( + self, + *, + actor_emails: List[str] | NotGiven = NOT_GIVEN, + actor_ids: List[str] | NotGiven = NOT_GIVEN, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN, + event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + resource_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OrganizationListAuditLogsResponse: + """ + List user actions and configuration changes within this organization. + + Args: + actor_emails: Return only events performed by users with these emails. + + actor_ids: Return only events performed by these actors. Can be a user ID, a service + account ID, or an api key tracking ID. + + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + effective_at: Return only events whose `effective_at` (Unix seconds) is in this range. + + event_types: Return only events with a `type` in one of these values. For example, + `project.created`. For all options, see the documentation for the + [audit log object](/docs/api-reference/audit-logs/object). + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + project_ids: Return only events for these projects. + + resource_ids: Return only events performed on these targets. For example, a project ID + updated. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/audit_logs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "actor_emails": actor_emails, + "actor_ids": actor_ids, + "after": after, + "before": before, + "effective_at": effective_at, + "event_types": event_types, + "limit": limit, + "project_ids": project_ids, + "resource_ids": resource_ids, + }, + organization_list_audit_logs_params.OrganizationListAuditLogsParams, + ), + ), + cast_to=OrganizationListAuditLogsResponse, + ) + + +class OrganizationResourceWithRawResponse: + def __init__(self, organization: OrganizationResource) -> None: + self._organization = organization + + self.get_costs = to_raw_response_wrapper( + organization.get_costs, + ) + self.list_audit_logs = to_raw_response_wrapper( + organization.list_audit_logs, + ) + + @cached_property + def admin_api_keys(self) -> AdminAPIKeysResourceWithRawResponse: + return AdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys) + + @cached_property + def invites(self) -> InvitesResourceWithRawResponse: + return InvitesResourceWithRawResponse(self._organization.invites) + + @cached_property + def projects(self) -> ProjectsResourceWithRawResponse: + return ProjectsResourceWithRawResponse(self._organization.projects) + + @cached_property + def usage(self) -> UsageResourceWithRawResponse: + return UsageResourceWithRawResponse(self._organization.usage) + + @cached_property + def users(self) -> UsersResourceWithRawResponse: + return UsersResourceWithRawResponse(self._organization.users) + + +class AsyncOrganizationResourceWithRawResponse: + def __init__(self, organization: AsyncOrganizationResource) -> None: + self._organization = organization + + self.get_costs = async_to_raw_response_wrapper( + organization.get_costs, + ) + self.list_audit_logs = async_to_raw_response_wrapper( + organization.list_audit_logs, + ) + + @cached_property + def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithRawResponse: + return AsyncAdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys) + + @cached_property + def invites(self) -> AsyncInvitesResourceWithRawResponse: + return AsyncInvitesResourceWithRawResponse(self._organization.invites) + + @cached_property + def projects(self) -> AsyncProjectsResourceWithRawResponse: + return AsyncProjectsResourceWithRawResponse(self._organization.projects) + + @cached_property + def usage(self) -> AsyncUsageResourceWithRawResponse: + return AsyncUsageResourceWithRawResponse(self._organization.usage) + + @cached_property + def users(self) -> AsyncUsersResourceWithRawResponse: + return AsyncUsersResourceWithRawResponse(self._organization.users) + + +class OrganizationResourceWithStreamingResponse: + def __init__(self, organization: OrganizationResource) -> None: + self._organization = organization + + self.get_costs = to_streamed_response_wrapper( + organization.get_costs, + ) + self.list_audit_logs = to_streamed_response_wrapper( + organization.list_audit_logs, + ) + + @cached_property + def admin_api_keys(self) -> AdminAPIKeysResourceWithStreamingResponse: + return AdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys) + + @cached_property + def invites(self) -> InvitesResourceWithStreamingResponse: + return InvitesResourceWithStreamingResponse(self._organization.invites) + + @cached_property + def projects(self) -> ProjectsResourceWithStreamingResponse: + return ProjectsResourceWithStreamingResponse(self._organization.projects) + + @cached_property + def usage(self) -> UsageResourceWithStreamingResponse: + return UsageResourceWithStreamingResponse(self._organization.usage) + + @cached_property + def users(self) -> UsersResourceWithStreamingResponse: + return UsersResourceWithStreamingResponse(self._organization.users) + + +class AsyncOrganizationResourceWithStreamingResponse: + def __init__(self, organization: AsyncOrganizationResource) -> None: + self._organization = organization + + self.get_costs = async_to_streamed_response_wrapper( + organization.get_costs, + ) + self.list_audit_logs = async_to_streamed_response_wrapper( + organization.list_audit_logs, + ) + + @cached_property + def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse: + return AsyncAdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys) + + @cached_property + def invites(self) -> AsyncInvitesResourceWithStreamingResponse: + return AsyncInvitesResourceWithStreamingResponse(self._organization.invites) + + @cached_property + def projects(self) -> AsyncProjectsResourceWithStreamingResponse: + return AsyncProjectsResourceWithStreamingResponse(self._organization.projects) + + @cached_property + def usage(self) -> AsyncUsageResourceWithStreamingResponse: + return AsyncUsageResourceWithStreamingResponse(self._organization.usage) + + @cached_property + def users(self) -> AsyncUsersResourceWithStreamingResponse: + return AsyncUsersResourceWithStreamingResponse(self._organization.users) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py b/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py new file mode 100644 index 00000000..f3ceec3b --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py @@ -0,0 +1,75 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .users import ( + UsersResource, + AsyncUsersResource, + UsersResourceWithRawResponse, + AsyncUsersResourceWithRawResponse, + UsersResourceWithStreamingResponse, + AsyncUsersResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .projects import ( + ProjectsResource, + AsyncProjectsResource, + ProjectsResourceWithRawResponse, + AsyncProjectsResourceWithRawResponse, + ProjectsResourceWithStreamingResponse, + AsyncProjectsResourceWithStreamingResponse, +) +from .rate_limits import ( + RateLimitsResource, + AsyncRateLimitsResource, + RateLimitsResourceWithRawResponse, + AsyncRateLimitsResourceWithRawResponse, + RateLimitsResourceWithStreamingResponse, + AsyncRateLimitsResourceWithStreamingResponse, +) +from .service_accounts import ( + ServiceAccountsResource, + AsyncServiceAccountsResource, + ServiceAccountsResourceWithRawResponse, + AsyncServiceAccountsResourceWithRawResponse, + ServiceAccountsResourceWithStreamingResponse, + AsyncServiceAccountsResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", + "RateLimitsResource", + "AsyncRateLimitsResource", + "RateLimitsResourceWithRawResponse", + "AsyncRateLimitsResourceWithRawResponse", + "RateLimitsResourceWithStreamingResponse", + "AsyncRateLimitsResourceWithStreamingResponse", + "ServiceAccountsResource", + "AsyncServiceAccountsResource", + "ServiceAccountsResourceWithRawResponse", + "AsyncServiceAccountsResourceWithRawResponse", + "ServiceAccountsResourceWithStreamingResponse", + "AsyncServiceAccountsResourceWithStreamingResponse", + "UsersResource", + "AsyncUsersResource", + "UsersResourceWithRawResponse", + "AsyncUsersResourceWithRawResponse", + "UsersResourceWithStreamingResponse", + "AsyncUsersResourceWithStreamingResponse", + "ProjectsResource", + "AsyncProjectsResource", + "ProjectsResourceWithRawResponse", + "AsyncProjectsResourceWithRawResponse", + "ProjectsResourceWithStreamingResponse", + "AsyncProjectsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py new file mode 100644 index 00000000..c5907765 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py @@ -0,0 +1,375 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.organization.projects import api_key_list_params +from ....types.organization.projects.api_key import APIKey +from ....types.organization.projects.api_key_list_response import APIKeyListResponse +from ....types.organization.projects.api_key_delete_response import APIKeyDeleteResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def retrieve( + self, + key_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKey: + """ + Retrieves an API key in the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return self._get( + f"/organization/projects/{project_id}/api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKey, + ) + + def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + Returns a list of API keys in the project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/organization/projects/{project_id}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + def delete( + self, + key_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + Deletes an API key from the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return self._delete( + f"/organization/projects/{project_id}/api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def retrieve( + self, + key_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKey: + """ + Retrieves an API key in the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return await self._get( + f"/organization/projects/{project_id}/api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKey, + ) + + async def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + Returns a list of API keys in the project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/organization/projects/{project_id}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + async def delete( + self, + key_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + Deletes an API key from the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not key_id: + raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") + return await self._delete( + f"/organization/projects/{project_id}/api_keys/{key_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.retrieve = to_raw_response_wrapper( + api_keys.retrieve, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.retrieve = async_to_raw_response_wrapper( + api_keys.retrieve, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.retrieve = to_streamed_response_wrapper( + api_keys.retrieve, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.retrieve = async_to_streamed_response_wrapper( + api_keys.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py new file mode 100644 index 00000000..93e42de8 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py @@ -0,0 +1,670 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .users import ( + UsersResource, + AsyncUsersResource, + UsersResourceWithRawResponse, + AsyncUsersResourceWithRawResponse, + UsersResourceWithStreamingResponse, + AsyncUsersResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from .rate_limits import ( + RateLimitsResource, + AsyncRateLimitsResource, + RateLimitsResourceWithRawResponse, + AsyncRateLimitsResourceWithRawResponse, + RateLimitsResourceWithStreamingResponse, + AsyncRateLimitsResourceWithStreamingResponse, +) +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from .service_accounts import ( + ServiceAccountsResource, + AsyncServiceAccountsResource, + ServiceAccountsResourceWithRawResponse, + AsyncServiceAccountsResourceWithRawResponse, + ServiceAccountsResourceWithStreamingResponse, + AsyncServiceAccountsResourceWithStreamingResponse, +) +from ....types.organization import project_list_params, project_create_params, project_update_params +from ....types.organization.project import Project +from ....types.organization.project_list_response import ProjectListResponse + +__all__ = ["ProjectsResource", "AsyncProjectsResource"] + + +class ProjectsResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + + @cached_property + def rate_limits(self) -> RateLimitsResource: + return RateLimitsResource(self._client) + + @cached_property + def service_accounts(self) -> ServiceAccountsResource: + return ServiceAccountsResource(self._client) + + @cached_property + def users(self) -> UsersResource: + return UsersResource(self._client) + + @cached_property + def with_raw_response(self) -> ProjectsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ProjectsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ProjectsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ProjectsResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """Create a new project in the organization. + + Projects can be created and archived, + but cannot be deleted. + + Args: + name: The friendly name of the project, this name appears in reports. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/organization/projects", + body=maybe_transform({"name": name}, project_create_params.ProjectCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + def retrieve( + self, + project_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """ + Retrieves a project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/organization/projects/{project_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + def update( + self, + project_id: str, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """ + Modifies a project in the organization. + + Args: + name: The updated name of the project, this name appears in reports. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/organization/projects/{project_id}", + body=maybe_transform({"name": name}, project_update_params.ProjectUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + include_archived: bool | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectListResponse: + """Returns a list of projects. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + include_archived: If `true` returns all projects including those that have been `archived`. + Archived projects are not included by default. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/projects", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "include_archived": include_archived, + "limit": limit, + }, + project_list_params.ProjectListParams, + ), + ), + cast_to=ProjectListResponse, + ) + + def archive( + self, + project_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """Archives a project in the organization. + + Archived projects cannot be used or + updated. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/organization/projects/{project_id}/archive", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + +class AsyncProjectsResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + + @cached_property + def rate_limits(self) -> AsyncRateLimitsResource: + return AsyncRateLimitsResource(self._client) + + @cached_property + def service_accounts(self) -> AsyncServiceAccountsResource: + return AsyncServiceAccountsResource(self._client) + + @cached_property + def users(self) -> AsyncUsersResource: + return AsyncUsersResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncProjectsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncProjectsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncProjectsResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """Create a new project in the organization. + + Projects can be created and archived, + but cannot be deleted. + + Args: + name: The friendly name of the project, this name appears in reports. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/organization/projects", + body=await async_maybe_transform({"name": name}, project_create_params.ProjectCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + async def retrieve( + self, + project_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """ + Retrieves a project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/organization/projects/{project_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + async def update( + self, + project_id: str, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """ + Modifies a project in the organization. + + Args: + name: The updated name of the project, this name appears in reports. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/organization/projects/{project_id}", + body=await async_maybe_transform({"name": name}, project_update_params.ProjectUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + include_archived: bool | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectListResponse: + """Returns a list of projects. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + include_archived: If `true` returns all projects including those that have been `archived`. + Archived projects are not included by default. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/projects", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "include_archived": include_archived, + "limit": limit, + }, + project_list_params.ProjectListParams, + ), + ), + cast_to=ProjectListResponse, + ) + + async def archive( + self, + project_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Project: + """Archives a project in the organization. + + Archived projects cannot be used or + updated. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/organization/projects/{project_id}/archive", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Project, + ) + + +class ProjectsResourceWithRawResponse: + def __init__(self, projects: ProjectsResource) -> None: + self._projects = projects + + self.create = to_raw_response_wrapper( + projects.create, + ) + self.retrieve = to_raw_response_wrapper( + projects.retrieve, + ) + self.update = to_raw_response_wrapper( + projects.update, + ) + self.list = to_raw_response_wrapper( + projects.list, + ) + self.archive = to_raw_response_wrapper( + projects.archive, + ) + + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._projects.api_keys) + + @cached_property + def rate_limits(self) -> RateLimitsResourceWithRawResponse: + return RateLimitsResourceWithRawResponse(self._projects.rate_limits) + + @cached_property + def service_accounts(self) -> ServiceAccountsResourceWithRawResponse: + return ServiceAccountsResourceWithRawResponse(self._projects.service_accounts) + + @cached_property + def users(self) -> UsersResourceWithRawResponse: + return UsersResourceWithRawResponse(self._projects.users) + + +class AsyncProjectsResourceWithRawResponse: + def __init__(self, projects: AsyncProjectsResource) -> None: + self._projects = projects + + self.create = async_to_raw_response_wrapper( + projects.create, + ) + self.retrieve = async_to_raw_response_wrapper( + projects.retrieve, + ) + self.update = async_to_raw_response_wrapper( + projects.update, + ) + self.list = async_to_raw_response_wrapper( + projects.list, + ) + self.archive = async_to_raw_response_wrapper( + projects.archive, + ) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._projects.api_keys) + + @cached_property + def rate_limits(self) -> AsyncRateLimitsResourceWithRawResponse: + return AsyncRateLimitsResourceWithRawResponse(self._projects.rate_limits) + + @cached_property + def service_accounts(self) -> AsyncServiceAccountsResourceWithRawResponse: + return AsyncServiceAccountsResourceWithRawResponse(self._projects.service_accounts) + + @cached_property + def users(self) -> AsyncUsersResourceWithRawResponse: + return AsyncUsersResourceWithRawResponse(self._projects.users) + + +class ProjectsResourceWithStreamingResponse: + def __init__(self, projects: ProjectsResource) -> None: + self._projects = projects + + self.create = to_streamed_response_wrapper( + projects.create, + ) + self.retrieve = to_streamed_response_wrapper( + projects.retrieve, + ) + self.update = to_streamed_response_wrapper( + projects.update, + ) + self.list = to_streamed_response_wrapper( + projects.list, + ) + self.archive = to_streamed_response_wrapper( + projects.archive, + ) + + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._projects.api_keys) + + @cached_property + def rate_limits(self) -> RateLimitsResourceWithStreamingResponse: + return RateLimitsResourceWithStreamingResponse(self._projects.rate_limits) + + @cached_property + def service_accounts(self) -> ServiceAccountsResourceWithStreamingResponse: + return ServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts) + + @cached_property + def users(self) -> UsersResourceWithStreamingResponse: + return UsersResourceWithStreamingResponse(self._projects.users) + + +class AsyncProjectsResourceWithStreamingResponse: + def __init__(self, projects: AsyncProjectsResource) -> None: + self._projects = projects + + self.create = async_to_streamed_response_wrapper( + projects.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + projects.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + projects.update, + ) + self.list = async_to_streamed_response_wrapper( + projects.list, + ) + self.archive = async_to_streamed_response_wrapper( + projects.archive, + ) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._projects.api_keys) + + @cached_property + def rate_limits(self) -> AsyncRateLimitsResourceWithStreamingResponse: + return AsyncRateLimitsResourceWithStreamingResponse(self._projects.rate_limits) + + @cached_property + def service_accounts(self) -> AsyncServiceAccountsResourceWithStreamingResponse: + return AsyncServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts) + + @cached_property + def users(self) -> AsyncUsersResourceWithStreamingResponse: + return AsyncUsersResourceWithStreamingResponse(self._projects.users) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py new file mode 100644 index 00000000..9c9dce7b --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py @@ -0,0 +1,360 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.organization.projects import rate_limit_list_params, rate_limit_update_params +from ....types.organization.projects.rate_limit import RateLimit +from ....types.organization.projects.rate_limit_list_response import RateLimitListResponse + +__all__ = ["RateLimitsResource", "AsyncRateLimitsResource"] + + +class RateLimitsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RateLimitsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return RateLimitsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RateLimitsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return RateLimitsResourceWithStreamingResponse(self) + + def update( + self, + rate_limit_id: str, + *, + project_id: str, + batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN, + max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN, + max_images_per_1_minute: int | NotGiven = NOT_GIVEN, + max_requests_per_1_day: int | NotGiven = NOT_GIVEN, + max_requests_per_1_minute: int | NotGiven = NOT_GIVEN, + max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RateLimit: + """ + Updates a project rate limit. + + Args: + batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models. + + max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models. + + max_images_per_1_minute: The maximum images per minute. Only relevant for certain models. + + max_requests_per_1_day: The maximum requests per day. Only relevant for certain models. + + max_requests_per_1_minute: The maximum requests per minute. + + max_tokens_per_1_minute: The maximum tokens per minute. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not rate_limit_id: + raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}") + return self._post( + f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}", + body=maybe_transform( + { + "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens, + "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute, + "max_images_per_1_minute": max_images_per_1_minute, + "max_requests_per_1_day": max_requests_per_1_day, + "max_requests_per_1_minute": max_requests_per_1_minute, + "max_tokens_per_1_minute": max_tokens_per_1_minute, + }, + rate_limit_update_params.RateLimitUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RateLimit, + ) + + def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RateLimitListResponse: + """ + Returns the rate limits per model for a project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + beginning with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. The default is 100. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/organization/projects/{project_id}/rate_limits", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + }, + rate_limit_list_params.RateLimitListParams, + ), + ), + cast_to=RateLimitListResponse, + ) + + +class AsyncRateLimitsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRateLimitsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncRateLimitsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRateLimitsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncRateLimitsResourceWithStreamingResponse(self) + + async def update( + self, + rate_limit_id: str, + *, + project_id: str, + batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN, + max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN, + max_images_per_1_minute: int | NotGiven = NOT_GIVEN, + max_requests_per_1_day: int | NotGiven = NOT_GIVEN, + max_requests_per_1_minute: int | NotGiven = NOT_GIVEN, + max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RateLimit: + """ + Updates a project rate limit. + + Args: + batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models. + + max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models. + + max_images_per_1_minute: The maximum images per minute. Only relevant for certain models. + + max_requests_per_1_day: The maximum requests per day. Only relevant for certain models. + + max_requests_per_1_minute: The maximum requests per minute. + + max_tokens_per_1_minute: The maximum tokens per minute. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not rate_limit_id: + raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}") + return await self._post( + f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}", + body=await async_maybe_transform( + { + "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens, + "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute, + "max_images_per_1_minute": max_images_per_1_minute, + "max_requests_per_1_day": max_requests_per_1_day, + "max_requests_per_1_minute": max_requests_per_1_minute, + "max_tokens_per_1_minute": max_tokens_per_1_minute, + }, + rate_limit_update_params.RateLimitUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RateLimit, + ) + + async def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RateLimitListResponse: + """ + Returns the rate limits per model for a project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + beginning with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. The default is 100. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/organization/projects/{project_id}/rate_limits", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + }, + rate_limit_list_params.RateLimitListParams, + ), + ), + cast_to=RateLimitListResponse, + ) + + +class RateLimitsResourceWithRawResponse: + def __init__(self, rate_limits: RateLimitsResource) -> None: + self._rate_limits = rate_limits + + self.update = to_raw_response_wrapper( + rate_limits.update, + ) + self.list = to_raw_response_wrapper( + rate_limits.list, + ) + + +class AsyncRateLimitsResourceWithRawResponse: + def __init__(self, rate_limits: AsyncRateLimitsResource) -> None: + self._rate_limits = rate_limits + + self.update = async_to_raw_response_wrapper( + rate_limits.update, + ) + self.list = async_to_raw_response_wrapper( + rate_limits.list, + ) + + +class RateLimitsResourceWithStreamingResponse: + def __init__(self, rate_limits: RateLimitsResource) -> None: + self._rate_limits = rate_limits + + self.update = to_streamed_response_wrapper( + rate_limits.update, + ) + self.list = to_streamed_response_wrapper( + rate_limits.list, + ) + + +class AsyncRateLimitsResourceWithStreamingResponse: + def __init__(self, rate_limits: AsyncRateLimitsResource) -> None: + self._rate_limits = rate_limits + + self.update = async_to_streamed_response_wrapper( + rate_limits.update, + ) + self.list = async_to_streamed_response_wrapper( + rate_limits.list, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py new file mode 100644 index 00000000..8957a81d --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py @@ -0,0 +1,466 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.organization.projects import service_account_list_params, service_account_create_params +from ....types.organization.projects.service_account import ServiceAccount +from ....types.organization.projects.service_account_list_response import ServiceAccountListResponse +from ....types.organization.projects.service_account_create_response import ServiceAccountCreateResponse +from ....types.organization.projects.service_account_delete_response import ServiceAccountDeleteResponse + +__all__ = ["ServiceAccountsResource", "AsyncServiceAccountsResource"] + + +class ServiceAccountsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ServiceAccountsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ServiceAccountsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ServiceAccountsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ServiceAccountsResourceWithStreamingResponse(self) + + def create( + self, + project_id: str, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccountCreateResponse: + """Creates a new service account in the project. + + This also returns an unredacted + API key for the service account. + + Args: + name: The name of the service account being created. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/organization/projects/{project_id}/service_accounts", + body=maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ServiceAccountCreateResponse, + ) + + def retrieve( + self, + service_account_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccount: + """ + Retrieves a service account in the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not service_account_id: + raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") + return self._get( + f"/organization/projects/{project_id}/service_accounts/{service_account_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ServiceAccount, + ) + + def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccountListResponse: + """ + Returns a list of service accounts in the project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/organization/projects/{project_id}/service_accounts", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + service_account_list_params.ServiceAccountListParams, + ), + ), + cast_to=ServiceAccountListResponse, + ) + + def delete( + self, + service_account_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccountDeleteResponse: + """ + Deletes a service account from the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not service_account_id: + raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") + return self._delete( + f"/organization/projects/{project_id}/service_accounts/{service_account_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ServiceAccountDeleteResponse, + ) + + +class AsyncServiceAccountsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncServiceAccountsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncServiceAccountsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncServiceAccountsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncServiceAccountsResourceWithStreamingResponse(self) + + async def create( + self, + project_id: str, + *, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccountCreateResponse: + """Creates a new service account in the project. + + This also returns an unredacted + API key for the service account. + + Args: + name: The name of the service account being created. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/organization/projects/{project_id}/service_accounts", + body=await async_maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ServiceAccountCreateResponse, + ) + + async def retrieve( + self, + service_account_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccount: + """ + Retrieves a service account in the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not service_account_id: + raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") + return await self._get( + f"/organization/projects/{project_id}/service_accounts/{service_account_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ServiceAccount, + ) + + async def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccountListResponse: + """ + Returns a list of service accounts in the project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/organization/projects/{project_id}/service_accounts", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + service_account_list_params.ServiceAccountListParams, + ), + ), + cast_to=ServiceAccountListResponse, + ) + + async def delete( + self, + service_account_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ServiceAccountDeleteResponse: + """ + Deletes a service account from the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not service_account_id: + raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") + return await self._delete( + f"/organization/projects/{project_id}/service_accounts/{service_account_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ServiceAccountDeleteResponse, + ) + + +class ServiceAccountsResourceWithRawResponse: + def __init__(self, service_accounts: ServiceAccountsResource) -> None: + self._service_accounts = service_accounts + + self.create = to_raw_response_wrapper( + service_accounts.create, + ) + self.retrieve = to_raw_response_wrapper( + service_accounts.retrieve, + ) + self.list = to_raw_response_wrapper( + service_accounts.list, + ) + self.delete = to_raw_response_wrapper( + service_accounts.delete, + ) + + +class AsyncServiceAccountsResourceWithRawResponse: + def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None: + self._service_accounts = service_accounts + + self.create = async_to_raw_response_wrapper( + service_accounts.create, + ) + self.retrieve = async_to_raw_response_wrapper( + service_accounts.retrieve, + ) + self.list = async_to_raw_response_wrapper( + service_accounts.list, + ) + self.delete = async_to_raw_response_wrapper( + service_accounts.delete, + ) + + +class ServiceAccountsResourceWithStreamingResponse: + def __init__(self, service_accounts: ServiceAccountsResource) -> None: + self._service_accounts = service_accounts + + self.create = to_streamed_response_wrapper( + service_accounts.create, + ) + self.retrieve = to_streamed_response_wrapper( + service_accounts.retrieve, + ) + self.list = to_streamed_response_wrapper( + service_accounts.list, + ) + self.delete = to_streamed_response_wrapper( + service_accounts.delete, + ) + + +class AsyncServiceAccountsResourceWithStreamingResponse: + def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None: + self._service_accounts = service_accounts + + self.create = async_to_streamed_response_wrapper( + service_accounts.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + service_accounts.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + service_accounts.list, + ) + self.delete = async_to_streamed_response_wrapper( + service_accounts.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/users.py b/src/digitalocean_genai_sdk/resources/organization/projects/users.py new file mode 100644 index 00000000..e35ff0cf --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/projects/users.py @@ -0,0 +1,577 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.organization.projects import user_add_params, user_list_params, user_update_params +from ....types.organization.projects.project_user import ProjectUser +from ....types.organization.projects.user_list_response import UserListResponse +from ....types.organization.projects.user_delete_response import UserDeleteResponse + +__all__ = ["UsersResource", "AsyncUsersResource"] + + +class UsersResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> UsersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return UsersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> UsersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return UsersResourceWithStreamingResponse(self) + + def retrieve( + self, + user_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectUser: + """ + Retrieves a user in the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return self._get( + f"/organization/projects/{project_id}/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectUser, + ) + + def update( + self, + user_id: str, + *, + project_id: str, + role: Literal["owner", "member"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectUser: + """ + Modifies a user's role in the project. + + Args: + role: `owner` or `member` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return self._post( + f"/organization/projects/{project_id}/users/{user_id}", + body=maybe_transform({"role": role}, user_update_params.UserUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectUser, + ) + + def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserListResponse: + """ + Returns a list of users in the project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/organization/projects/{project_id}/users", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "limit": limit, + }, + user_list_params.UserListParams, + ), + ), + cast_to=UserListResponse, + ) + + def delete( + self, + user_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserDeleteResponse: + """ + Deletes a user from the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return self._delete( + f"/organization/projects/{project_id}/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UserDeleteResponse, + ) + + def add( + self, + project_id: str, + *, + role: Literal["owner", "member"], + user_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectUser: + """Adds a user to the project. + + Users must already be members of the organization to + be added to a project. + + Args: + role: `owner` or `member` + + user_id: The ID of the user. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/organization/projects/{project_id}/users", + body=maybe_transform( + { + "role": role, + "user_id": user_id, + }, + user_add_params.UserAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectUser, + ) + + +class AsyncUsersResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncUsersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncUsersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncUsersResourceWithStreamingResponse(self) + + async def retrieve( + self, + user_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectUser: + """ + Retrieves a user in the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return await self._get( + f"/organization/projects/{project_id}/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectUser, + ) + + async def update( + self, + user_id: str, + *, + project_id: str, + role: Literal["owner", "member"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectUser: + """ + Modifies a user's role in the project. + + Args: + role: `owner` or `member` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return await self._post( + f"/organization/projects/{project_id}/users/{user_id}", + body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectUser, + ) + + async def list( + self, + project_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserListResponse: + """ + Returns a list of users in the project. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/organization/projects/{project_id}/users", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "limit": limit, + }, + user_list_params.UserListParams, + ), + ), + cast_to=UserListResponse, + ) + + async def delete( + self, + user_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserDeleteResponse: + """ + Deletes a user from the project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return await self._delete( + f"/organization/projects/{project_id}/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UserDeleteResponse, + ) + + async def add( + self, + project_id: str, + *, + role: Literal["owner", "member"], + user_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectUser: + """Adds a user to the project. + + Users must already be members of the organization to + be added to a project. + + Args: + role: `owner` or `member` + + user_id: The ID of the user. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/organization/projects/{project_id}/users", + body=await async_maybe_transform( + { + "role": role, + "user_id": user_id, + }, + user_add_params.UserAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectUser, + ) + + +class UsersResourceWithRawResponse: + def __init__(self, users: UsersResource) -> None: + self._users = users + + self.retrieve = to_raw_response_wrapper( + users.retrieve, + ) + self.update = to_raw_response_wrapper( + users.update, + ) + self.list = to_raw_response_wrapper( + users.list, + ) + self.delete = to_raw_response_wrapper( + users.delete, + ) + self.add = to_raw_response_wrapper( + users.add, + ) + + +class AsyncUsersResourceWithRawResponse: + def __init__(self, users: AsyncUsersResource) -> None: + self._users = users + + self.retrieve = async_to_raw_response_wrapper( + users.retrieve, + ) + self.update = async_to_raw_response_wrapper( + users.update, + ) + self.list = async_to_raw_response_wrapper( + users.list, + ) + self.delete = async_to_raw_response_wrapper( + users.delete, + ) + self.add = async_to_raw_response_wrapper( + users.add, + ) + + +class UsersResourceWithStreamingResponse: + def __init__(self, users: UsersResource) -> None: + self._users = users + + self.retrieve = to_streamed_response_wrapper( + users.retrieve, + ) + self.update = to_streamed_response_wrapper( + users.update, + ) + self.list = to_streamed_response_wrapper( + users.list, + ) + self.delete = to_streamed_response_wrapper( + users.delete, + ) + self.add = to_streamed_response_wrapper( + users.add, + ) + + +class AsyncUsersResourceWithStreamingResponse: + def __init__(self, users: AsyncUsersResource) -> None: + self._users = users + + self.retrieve = async_to_streamed_response_wrapper( + users.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + users.update, + ) + self.list = async_to_streamed_response_wrapper( + users.list, + ) + self.delete = async_to_streamed_response_wrapper( + users.delete, + ) + self.add = async_to_streamed_response_wrapper( + users.add, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/usage.py b/src/digitalocean_genai_sdk/resources/organization/usage.py new file mode 100644 index 00000000..37d11956 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/usage.py @@ -0,0 +1,1543 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.organization import ( + usage_images_params, + usage_embeddings_params, + usage_completions_params, + usage_moderations_params, + usage_vector_stores_params, + usage_audio_speeches_params, + usage_audio_transcriptions_params, + usage_code_interpreter_sessions_params, +) +from ...types.usage_response import UsageResponse + +__all__ = ["UsageResource", "AsyncUsageResource"] + + +class UsageResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> UsageResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return UsageResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> UsageResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return UsageResourceWithStreamingResponse(self) + + def audio_speeches( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get audio speeches usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/audio_speeches", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_audio_speeches_params.UsageAudioSpeechesParams, + ), + ), + cast_to=UsageResponse, + ) + + def audio_transcriptions( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get audio transcriptions usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/audio_transcriptions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_audio_transcriptions_params.UsageAudioTranscriptionsParams, + ), + ), + cast_to=UsageResponse, + ) + + def code_interpreter_sessions( + self, + *, + start_time: int, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get code interpreter sessions usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/code_interpreter_sessions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "page": page, + "project_ids": project_ids, + }, + usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams, + ), + ), + cast_to=UsageResponse, + ) + + def completions( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + batch: bool | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get completions usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By + default, return both. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of + them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/completions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "batch": batch, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_completions_params.UsageCompletionsParams, + ), + ), + cast_to=UsageResponse, + ) + + def embeddings( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get embeddings usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/embeddings", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_embeddings_params.UsageEmbeddingsParams, + ), + ), + cast_to=UsageResponse, + ) + + def images( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] + | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN, + sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get images usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any + combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + sizes: Return only usages for these image sizes. Possible values are `256x256`, + `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them. + + sources: Return only usages for these sources. Possible values are `image.generation`, + `image.edit`, `image.variation` or any combination of them. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/images", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "sizes": sizes, + "sources": sources, + "user_ids": user_ids, + }, + usage_images_params.UsageImagesParams, + ), + ), + cast_to=UsageResponse, + ) + + def moderations( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get moderations usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/moderations", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_moderations_params.UsageModerationsParams, + ), + ), + cast_to=UsageResponse, + ) + + def vector_stores( + self, + *, + start_time: int, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get vector stores usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/usage/vector_stores", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start_time": start_time, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "page": page, + "project_ids": project_ids, + }, + usage_vector_stores_params.UsageVectorStoresParams, + ), + ), + cast_to=UsageResponse, + ) + + +class AsyncUsageResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncUsageResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncUsageResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncUsageResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncUsageResourceWithStreamingResponse(self) + + async def audio_speeches( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get audio speeches usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/audio_speeches", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_audio_speeches_params.UsageAudioSpeechesParams, + ), + ), + cast_to=UsageResponse, + ) + + async def audio_transcriptions( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get audio transcriptions usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/audio_transcriptions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_audio_transcriptions_params.UsageAudioTranscriptionsParams, + ), + ), + cast_to=UsageResponse, + ) + + async def code_interpreter_sessions( + self, + *, + start_time: int, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get code interpreter sessions usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/code_interpreter_sessions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "page": page, + "project_ids": project_ids, + }, + usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams, + ), + ), + cast_to=UsageResponse, + ) + + async def completions( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + batch: bool | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get completions usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By + default, return both. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of + them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/completions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "batch": batch, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_completions_params.UsageCompletionsParams, + ), + ), + cast_to=UsageResponse, + ) + + async def embeddings( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get embeddings usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/embeddings", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_embeddings_params.UsageEmbeddingsParams, + ), + ), + cast_to=UsageResponse, + ) + + async def images( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] + | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN, + sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get images usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any + combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + sizes: Return only usages for these image sizes. Possible values are `256x256`, + `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them. + + sources: Return only usages for these sources. Possible values are `image.generation`, + `image.edit`, `image.variation` or any combination of them. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/images", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "sizes": sizes, + "sources": sources, + "user_ids": user_ids, + }, + usage_images_params.UsageImagesParams, + ), + ), + cast_to=UsageResponse, + ) + + async def moderations( + self, + *, + start_time: int, + api_key_ids: List[str] | NotGiven = NOT_GIVEN, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + models: List[str] | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + user_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get moderations usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + api_key_ids: Return only usage for these API keys. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + models: Return only usage for these models. + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + user_ids: Return only usage for these users. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/moderations", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "api_key_ids": api_key_ids, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "models": models, + "page": page, + "project_ids": project_ids, + "user_ids": user_ids, + }, + usage_moderations_params.UsageModerationsParams, + ), + ), + cast_to=UsageResponse, + ) + + async def vector_stores( + self, + *, + start_time: int, + bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, + end_time: int | NotGiven = NOT_GIVEN, + group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + page: str | NotGiven = NOT_GIVEN, + project_ids: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UsageResponse: + """ + Get vector stores usage details for the organization. + + Args: + start_time: Start time (Unix seconds) of the query time range, inclusive. + + bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are + supported, default to `1d`. + + end_time: End time (Unix seconds) of the query time range, exclusive. + + group_by: Group the usage data by the specified fields. Support fields include + `project_id`. + + limit: Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + + page: A cursor for use in pagination. Corresponding to the `next_page` field from the + previous response. + + project_ids: Return only usage for these projects. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/usage/vector_stores", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start_time": start_time, + "bucket_width": bucket_width, + "end_time": end_time, + "group_by": group_by, + "limit": limit, + "page": page, + "project_ids": project_ids, + }, + usage_vector_stores_params.UsageVectorStoresParams, + ), + ), + cast_to=UsageResponse, + ) + + +class UsageResourceWithRawResponse: + def __init__(self, usage: UsageResource) -> None: + self._usage = usage + + self.audio_speeches = to_raw_response_wrapper( + usage.audio_speeches, + ) + self.audio_transcriptions = to_raw_response_wrapper( + usage.audio_transcriptions, + ) + self.code_interpreter_sessions = to_raw_response_wrapper( + usage.code_interpreter_sessions, + ) + self.completions = to_raw_response_wrapper( + usage.completions, + ) + self.embeddings = to_raw_response_wrapper( + usage.embeddings, + ) + self.images = to_raw_response_wrapper( + usage.images, + ) + self.moderations = to_raw_response_wrapper( + usage.moderations, + ) + self.vector_stores = to_raw_response_wrapper( + usage.vector_stores, + ) + + +class AsyncUsageResourceWithRawResponse: + def __init__(self, usage: AsyncUsageResource) -> None: + self._usage = usage + + self.audio_speeches = async_to_raw_response_wrapper( + usage.audio_speeches, + ) + self.audio_transcriptions = async_to_raw_response_wrapper( + usage.audio_transcriptions, + ) + self.code_interpreter_sessions = async_to_raw_response_wrapper( + usage.code_interpreter_sessions, + ) + self.completions = async_to_raw_response_wrapper( + usage.completions, + ) + self.embeddings = async_to_raw_response_wrapper( + usage.embeddings, + ) + self.images = async_to_raw_response_wrapper( + usage.images, + ) + self.moderations = async_to_raw_response_wrapper( + usage.moderations, + ) + self.vector_stores = async_to_raw_response_wrapper( + usage.vector_stores, + ) + + +class UsageResourceWithStreamingResponse: + def __init__(self, usage: UsageResource) -> None: + self._usage = usage + + self.audio_speeches = to_streamed_response_wrapper( + usage.audio_speeches, + ) + self.audio_transcriptions = to_streamed_response_wrapper( + usage.audio_transcriptions, + ) + self.code_interpreter_sessions = to_streamed_response_wrapper( + usage.code_interpreter_sessions, + ) + self.completions = to_streamed_response_wrapper( + usage.completions, + ) + self.embeddings = to_streamed_response_wrapper( + usage.embeddings, + ) + self.images = to_streamed_response_wrapper( + usage.images, + ) + self.moderations = to_streamed_response_wrapper( + usage.moderations, + ) + self.vector_stores = to_streamed_response_wrapper( + usage.vector_stores, + ) + + +class AsyncUsageResourceWithStreamingResponse: + def __init__(self, usage: AsyncUsageResource) -> None: + self._usage = usage + + self.audio_speeches = async_to_streamed_response_wrapper( + usage.audio_speeches, + ) + self.audio_transcriptions = async_to_streamed_response_wrapper( + usage.audio_transcriptions, + ) + self.code_interpreter_sessions = async_to_streamed_response_wrapper( + usage.code_interpreter_sessions, + ) + self.completions = async_to_streamed_response_wrapper( + usage.completions, + ) + self.embeddings = async_to_streamed_response_wrapper( + usage.embeddings, + ) + self.images = async_to_streamed_response_wrapper( + usage.images, + ) + self.moderations = async_to_streamed_response_wrapper( + usage.moderations, + ) + self.vector_stores = async_to_streamed_response_wrapper( + usage.vector_stores, + ) diff --git a/src/digitalocean_genai_sdk/resources/organization/users.py b/src/digitalocean_genai_sdk/resources/organization/users.py new file mode 100644 index 00000000..536e4396 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/organization/users.py @@ -0,0 +1,454 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.organization import user_list_params, user_update_params +from ...types.organization.organization_user import OrganizationUser +from ...types.organization.user_list_response import UserListResponse +from ...types.organization.user_delete_response import UserDeleteResponse + +__all__ = ["UsersResource", "AsyncUsersResource"] + + +class UsersResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> UsersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return UsersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> UsersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return UsersResourceWithStreamingResponse(self) + + def retrieve( + self, + user_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OrganizationUser: + """ + Retrieves a user by their identifier. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return self._get( + f"/organization/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OrganizationUser, + ) + + def update( + self, + user_id: str, + *, + role: Literal["owner", "reader"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OrganizationUser: + """ + Modifies a user's role in the organization. + + Args: + role: `owner` or `reader` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return self._post( + f"/organization/users/{user_id}", + body=maybe_transform({"role": role}, user_update_params.UserUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OrganizationUser, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + emails: List[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserListResponse: + """ + Lists all of the users in the organization. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + emails: Filter by the email address of users. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/organization/users", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "emails": emails, + "limit": limit, + }, + user_list_params.UserListParams, + ), + ), + cast_to=UserListResponse, + ) + + def delete( + self, + user_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserDeleteResponse: + """ + Deletes a user from the organization. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return self._delete( + f"/organization/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UserDeleteResponse, + ) + + +class AsyncUsersResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncUsersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncUsersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncUsersResourceWithStreamingResponse(self) + + async def retrieve( + self, + user_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OrganizationUser: + """ + Retrieves a user by their identifier. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return await self._get( + f"/organization/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OrganizationUser, + ) + + async def update( + self, + user_id: str, + *, + role: Literal["owner", "reader"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> OrganizationUser: + """ + Modifies a user's role in the organization. + + Args: + role: `owner` or `reader` + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return await self._post( + f"/organization/users/{user_id}", + body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=OrganizationUser, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + emails: List[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserListResponse: + """ + Lists all of the users in the organization. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + emails: Filter by the email address of users. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/organization/users", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "emails": emails, + "limit": limit, + }, + user_list_params.UserListParams, + ), + ), + cast_to=UserListResponse, + ) + + async def delete( + self, + user_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UserDeleteResponse: + """ + Deletes a user from the organization. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not user_id: + raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") + return await self._delete( + f"/organization/users/{user_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UserDeleteResponse, + ) + + +class UsersResourceWithRawResponse: + def __init__(self, users: UsersResource) -> None: + self._users = users + + self.retrieve = to_raw_response_wrapper( + users.retrieve, + ) + self.update = to_raw_response_wrapper( + users.update, + ) + self.list = to_raw_response_wrapper( + users.list, + ) + self.delete = to_raw_response_wrapper( + users.delete, + ) + + +class AsyncUsersResourceWithRawResponse: + def __init__(self, users: AsyncUsersResource) -> None: + self._users = users + + self.retrieve = async_to_raw_response_wrapper( + users.retrieve, + ) + self.update = async_to_raw_response_wrapper( + users.update, + ) + self.list = async_to_raw_response_wrapper( + users.list, + ) + self.delete = async_to_raw_response_wrapper( + users.delete, + ) + + +class UsersResourceWithStreamingResponse: + def __init__(self, users: UsersResource) -> None: + self._users = users + + self.retrieve = to_streamed_response_wrapper( + users.retrieve, + ) + self.update = to_streamed_response_wrapper( + users.update, + ) + self.list = to_streamed_response_wrapper( + users.list, + ) + self.delete = to_streamed_response_wrapper( + users.delete, + ) + + +class AsyncUsersResourceWithStreamingResponse: + def __init__(self, users: AsyncUsersResource) -> None: + self._users = users + + self.retrieve = async_to_streamed_response_wrapper( + users.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + users.update, + ) + self.list = async_to_streamed_response_wrapper( + users.list, + ) + self.delete = async_to_streamed_response_wrapper( + users.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/realtime.py b/src/digitalocean_genai_sdk/resources/realtime.py new file mode 100644 index 00000000..4c70a798 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/realtime.py @@ -0,0 +1,574 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal + +import httpx + +from ..types import realtime_create_session_params, realtime_create_transcription_session_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.voice_ids_shared_param import VoiceIDsSharedParam +from ..types.realtime_create_session_response import RealtimeCreateSessionResponse +from ..types.realtime_create_transcription_session_response import RealtimeCreateTranscriptionSessionResponse + +__all__ = ["RealtimeResource", "AsyncRealtimeResource"] + + +class RealtimeResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RealtimeResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return RealtimeResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RealtimeResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return RealtimeResourceWithStreamingResponse(self) + + def create_session( + self, + *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN, + input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + | NotGiven = NOT_GIVEN, + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + tool_choice: str | NotGiven = NOT_GIVEN, + tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN, + turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN, + voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RealtimeCreateSessionResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API. Can be configured with the same session parameters as the + `session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. + + input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn + off. Noise reduction filters audio added to the input audio buffer before it is + sent to VAD and the model. Filtering the audio can improve VAD and turn + detection accuracy (reducing false positives) and model performance by improving + perception of the input audio. + + input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + model: The Realtime model used for this session. + + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. + + temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + temperature of 0.8 is highly recommended for best performance. + + tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify + a function. + + tools: Tools (functions) available to the model. + + turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + set to `null` to turn off, in which case the client must manually trigger model + response. Server VAD means that the model will detect the start and end of + speech based on audio volume and respond at the end of user speech. Semantic VAD + is more advanced and uses a turn detection model (in conjuction with VAD) to + semantically estimate whether the user has finished speaking, then dynamically + sets a timeout based on this probability. For example, if user audio trails off + with "uhhm", the model will score a low probability of turn end and wait longer + for the user to continue speaking. This can be useful for more natural + conversations, but may have a higher latency. + + voice: The voice the model uses to respond. Voice cannot be changed during the session + once the model has responded with audio at least once. Current voice options are + `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + `shimmer`, and `verse`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/realtime/sessions", + body=maybe_transform( + { + "input_audio_format": input_audio_format, + "input_audio_noise_reduction": input_audio_noise_reduction, + "input_audio_transcription": input_audio_transcription, + "instructions": instructions, + "max_response_output_tokens": max_response_output_tokens, + "modalities": modalities, + "model": model, + "output_audio_format": output_audio_format, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "turn_detection": turn_detection, + "voice": voice, + }, + realtime_create_session_params.RealtimeCreateSessionParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RealtimeCreateSessionResponse, + ) + + def create_transcription_session( + self, + *, + include: List[str] | NotGiven = NOT_GIVEN, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction + | NotGiven = NOT_GIVEN, + input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription + | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RealtimeCreateTranscriptionSessionResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API specifically for realtime transcriptions. Can be configured with + the same session parameters as the `transcription_session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + include: + The set of items to include in the transcription. Current available items are: + + - `item.input_audio_transcription.logprobs` + + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. + + input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn + off. Noise reduction filters audio added to the input audio buffer before it is + sent to VAD and the model. Filtering the audio can improve VAD and turn + detection accuracy (reducing false positives) and model performance by improving + perception of the input audio. + + input_audio_transcription: Configuration for input audio transcription. The client can optionally set the + language and prompt for transcription, these offer additional guidance to the + transcription service. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + set to `null` to turn off, in which case the client must manually trigger model + response. Server VAD means that the model will detect the start and end of + speech based on audio volume and respond at the end of user speech. Semantic VAD + is more advanced and uses a turn detection model (in conjuction with VAD) to + semantically estimate whether the user has finished speaking, then dynamically + sets a timeout based on this probability. For example, if user audio trails off + with "uhhm", the model will score a low probability of turn end and wait longer + for the user to continue speaking. This can be useful for more natural + conversations, but may have a higher latency. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/realtime/transcription_sessions", + body=maybe_transform( + { + "include": include, + "input_audio_format": input_audio_format, + "input_audio_noise_reduction": input_audio_noise_reduction, + "input_audio_transcription": input_audio_transcription, + "modalities": modalities, + "turn_detection": turn_detection, + }, + realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RealtimeCreateTranscriptionSessionResponse, + ) + + +class AsyncRealtimeResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRealtimeResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncRealtimeResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRealtimeResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncRealtimeResourceWithStreamingResponse(self) + + async def create_session( + self, + *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN, + input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + | NotGiven = NOT_GIVEN, + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + tool_choice: str | NotGiven = NOT_GIVEN, + tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN, + turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN, + voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RealtimeCreateSessionResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API. Can be configured with the same session parameters as the + `session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. + + input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn + off. Noise reduction filters audio added to the input audio buffer before it is + sent to VAD and the model. Filtering the audio can improve VAD and turn + detection accuracy (reducing false positives) and model performance by improving + perception of the input audio. + + input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + model: The Realtime model used for this session. + + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. + + temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + temperature of 0.8 is highly recommended for best performance. + + tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify + a function. + + tools: Tools (functions) available to the model. + + turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + set to `null` to turn off, in which case the client must manually trigger model + response. Server VAD means that the model will detect the start and end of + speech based on audio volume and respond at the end of user speech. Semantic VAD + is more advanced and uses a turn detection model (in conjuction with VAD) to + semantically estimate whether the user has finished speaking, then dynamically + sets a timeout based on this probability. For example, if user audio trails off + with "uhhm", the model will score a low probability of turn end and wait longer + for the user to continue speaking. This can be useful for more natural + conversations, but may have a higher latency. + + voice: The voice the model uses to respond. Voice cannot be changed during the session + once the model has responded with audio at least once. Current voice options are + `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + `shimmer`, and `verse`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/realtime/sessions", + body=await async_maybe_transform( + { + "input_audio_format": input_audio_format, + "input_audio_noise_reduction": input_audio_noise_reduction, + "input_audio_transcription": input_audio_transcription, + "instructions": instructions, + "max_response_output_tokens": max_response_output_tokens, + "modalities": modalities, + "model": model, + "output_audio_format": output_audio_format, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "turn_detection": turn_detection, + "voice": voice, + }, + realtime_create_session_params.RealtimeCreateSessionParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RealtimeCreateSessionResponse, + ) + + async def create_transcription_session( + self, + *, + include: List[str] | NotGiven = NOT_GIVEN, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction + | NotGiven = NOT_GIVEN, + input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription + | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RealtimeCreateTranscriptionSessionResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API specifically for realtime transcriptions. Can be configured with + the same session parameters as the `transcription_session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + include: + The set of items to include in the transcription. Current available items are: + + - `item.input_audio_transcription.logprobs` + + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. + + input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn + off. Noise reduction filters audio added to the input audio buffer before it is + sent to VAD and the model. Filtering the audio can improve VAD and turn + detection accuracy (reducing false positives) and model performance by improving + perception of the input audio. + + input_audio_transcription: Configuration for input audio transcription. The client can optionally set the + language and prompt for transcription, these offer additional guidance to the + transcription service. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + set to `null` to turn off, in which case the client must manually trigger model + response. Server VAD means that the model will detect the start and end of + speech based on audio volume and respond at the end of user speech. Semantic VAD + is more advanced and uses a turn detection model (in conjuction with VAD) to + semantically estimate whether the user has finished speaking, then dynamically + sets a timeout based on this probability. For example, if user audio trails off + with "uhhm", the model will score a low probability of turn end and wait longer + for the user to continue speaking. This can be useful for more natural + conversations, but may have a higher latency. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/realtime/transcription_sessions", + body=await async_maybe_transform( + { + "include": include, + "input_audio_format": input_audio_format, + "input_audio_noise_reduction": input_audio_noise_reduction, + "input_audio_transcription": input_audio_transcription, + "modalities": modalities, + "turn_detection": turn_detection, + }, + realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RealtimeCreateTranscriptionSessionResponse, + ) + + +class RealtimeResourceWithRawResponse: + def __init__(self, realtime: RealtimeResource) -> None: + self._realtime = realtime + + self.create_session = to_raw_response_wrapper( + realtime.create_session, + ) + self.create_transcription_session = to_raw_response_wrapper( + realtime.create_transcription_session, + ) + + +class AsyncRealtimeResourceWithRawResponse: + def __init__(self, realtime: AsyncRealtimeResource) -> None: + self._realtime = realtime + + self.create_session = async_to_raw_response_wrapper( + realtime.create_session, + ) + self.create_transcription_session = async_to_raw_response_wrapper( + realtime.create_transcription_session, + ) + + +class RealtimeResourceWithStreamingResponse: + def __init__(self, realtime: RealtimeResource) -> None: + self._realtime = realtime + + self.create_session = to_streamed_response_wrapper( + realtime.create_session, + ) + self.create_transcription_session = to_streamed_response_wrapper( + realtime.create_transcription_session, + ) + + +class AsyncRealtimeResourceWithStreamingResponse: + def __init__(self, realtime: AsyncRealtimeResource) -> None: + self._realtime = realtime + + self.create_session = async_to_streamed_response_wrapper( + realtime.create_session, + ) + self.create_transcription_session = async_to_streamed_response_wrapper( + realtime.create_transcription_session, + ) diff --git a/src/digitalocean_genai_sdk/resources/responses.py b/src/digitalocean_genai_sdk/resources/responses.py new file mode 100644 index 00000000..03445cdc --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/responses.py @@ -0,0 +1,902 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from ..types import response_create_params, response_retrieve_params, response_list_input_items_params +from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.response import Response +from ..types.includable import Includable +from ..types.response_list_input_items_response import ResponseListInputItemsResponse + +__all__ = ["ResponsesResource", "AsyncResponsesResource"] + + +class ResponsesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ResponsesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ResponsesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ResponsesResourceWithStreamingResponse(self) + + def create( + self, + *, + input: Union[str, Iterable[response_create_params.InputInputItemList]], + model: Union[ + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "computer-use-preview", + "computer-use-preview-2025-03-11", + ], + str, + ], + include: Optional[List[Includable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """Creates a model response. + + Provide [text](/docs/guides/text) or + [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or + [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own + [custom code](/docs/guides/function-calling) or use built-in + [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or + [file search](/docs/guides/tools-file-search) to use your own data as input for + the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Image inputs](/docs/guides/images) + - [File inputs](/docs/guides/pdf-files) + - [Conversation state](/docs/guides/conversation-state) + - [Function calling](/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the [model guide](/docs/models) to browse and compare + available models. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of + + the file search tool call. + + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/responses-streaming) for + more information. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Structured Outputs](/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like [web search](/docs/guides/tools-web-search) or + [file search](/docs/guides/tools-file-search). Learn more about + [built-in tools](/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/responses", + body=maybe_transform( + { + "input": input, + "model": model, + "include": include, + "instructions": instructions, + "max_output_tokens": max_output_tokens, + "metadata": metadata, + "parallel_tool_calls": parallel_tool_calls, + "previous_response_id": previous_response_id, + "reasoning": reasoning, + "store": store, + "stream": stream, + "temperature": temperature, + "text": text, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation": truncation, + "user": user, + }, + response_create_params.ResponseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Response, + ) + + def retrieve( + self, + response_id: str, + *, + include: List[Includable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """ + Retrieves a model response with the given ID. + + Args: + include: Specify additional output data to include in the response. Currently supported + values are: + + - `file_search_call.results`: Include the search results of + + the file search tool call. + + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return self._get( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams), + ), + cast_to=Response, + ) + + def delete( + self, + response_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Deletes a model response with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def list_input_items( + self, + response_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResponseListInputItemsResponse: + """ + Returns a list of input items for a given response. + + Args: + after: An item ID to list items after, used in pagination. + + before: An item ID to list items before, used in pagination. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `asc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return self._get( + f"/responses/{response_id}/input_items", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + response_list_input_items_params.ResponseListInputItemsParams, + ), + ), + cast_to=ResponseListInputItemsResponse, + ) + + +class AsyncResponsesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncResponsesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncResponsesResourceWithStreamingResponse(self) + + async def create( + self, + *, + input: Union[str, Iterable[response_create_params.InputInputItemList]], + model: Union[ + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "computer-use-preview", + "computer-use-preview-2025-03-11", + ], + str, + ], + include: Optional[List[Includable]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, + previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, + tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """Creates a model response. + + Provide [text](/docs/guides/text) or + [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or + [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own + [custom code](/docs/guides/function-calling) or use built-in + [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or + [file search](/docs/guides/tools-file-search) to use your own data as input for + the model's response. + + Args: + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Image inputs](/docs/guides/images) + - [File inputs](/docs/guides/pdf-files) + - [Conversation state](/docs/guides/conversation-state) + - [Function calling](/docs/guides/function-calling) + + model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the [model guide](/docs/models) to browse and compare + available models. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `file_search_call.results`: Include the search results of + + the file search tool call. + + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + instructions: Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + + max_output_tokens: An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. + + previous_response_id: The unique ID of the previous response to the model. Use this to create + multi-turn conversations. Learn more about + [conversation state](/docs/guides/conversation-state). + + reasoning: **o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + + store: Whether to store the generated model response for later retrieval via API. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/responses-streaming) for + more information. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Structured Outputs](/docs/guides/structured-outputs) + + tool_choice: How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + + tools: An array of tools the model may call while generating a response. You can + specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like [web search](/docs/guides/tools-web-search) or + [file search](/docs/guides/tools-file-search). Learn more about + [built-in tools](/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](/docs/guides/function-calling). + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + truncation: The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/responses", + body=await async_maybe_transform( + { + "input": input, + "model": model, + "include": include, + "instructions": instructions, + "max_output_tokens": max_output_tokens, + "metadata": metadata, + "parallel_tool_calls": parallel_tool_calls, + "previous_response_id": previous_response_id, + "reasoning": reasoning, + "store": store, + "stream": stream, + "temperature": temperature, + "text": text, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation": truncation, + "user": user, + }, + response_create_params.ResponseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Response, + ) + + async def retrieve( + self, + response_id: str, + *, + include: List[Includable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Response: + """ + Retrieves a model response with the given ID. + + Args: + include: Specify additional output data to include in the response. Currently supported + values are: + + - `file_search_call.results`: Include the search results of + + the file search tool call. + + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return await self._get( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"include": include}, response_retrieve_params.ResponseRetrieveParams + ), + ), + cast_to=Response, + ) + + async def delete( + self, + response_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Deletes a model response with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/responses/{response_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def list_input_items( + self, + response_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResponseListInputItemsResponse: + """ + Returns a list of input items for a given response. + + Args: + after: An item ID to list items after, used in pagination. + + before: An item ID to list items before, used in pagination. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `asc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not response_id: + raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") + return await self._get( + f"/responses/{response_id}/input_items", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + response_list_input_items_params.ResponseListInputItemsParams, + ), + ), + cast_to=ResponseListInputItemsResponse, + ) + + +class ResponsesResourceWithRawResponse: + def __init__(self, responses: ResponsesResource) -> None: + self._responses = responses + + self.create = to_raw_response_wrapper( + responses.create, + ) + self.retrieve = to_raw_response_wrapper( + responses.retrieve, + ) + self.delete = to_raw_response_wrapper( + responses.delete, + ) + self.list_input_items = to_raw_response_wrapper( + responses.list_input_items, + ) + + +class AsyncResponsesResourceWithRawResponse: + def __init__(self, responses: AsyncResponsesResource) -> None: + self._responses = responses + + self.create = async_to_raw_response_wrapper( + responses.create, + ) + self.retrieve = async_to_raw_response_wrapper( + responses.retrieve, + ) + self.delete = async_to_raw_response_wrapper( + responses.delete, + ) + self.list_input_items = async_to_raw_response_wrapper( + responses.list_input_items, + ) + + +class ResponsesResourceWithStreamingResponse: + def __init__(self, responses: ResponsesResource) -> None: + self._responses = responses + + self.create = to_streamed_response_wrapper( + responses.create, + ) + self.retrieve = to_streamed_response_wrapper( + responses.retrieve, + ) + self.delete = to_streamed_response_wrapper( + responses.delete, + ) + self.list_input_items = to_streamed_response_wrapper( + responses.list_input_items, + ) + + +class AsyncResponsesResourceWithStreamingResponse: + def __init__(self, responses: AsyncResponsesResource) -> None: + self._responses = responses + + self.create = async_to_streamed_response_wrapper( + responses.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + responses.retrieve, + ) + self.delete = async_to_streamed_response_wrapper( + responses.delete, + ) + self.list_input_items = async_to_streamed_response_wrapper( + responses.list_input_items, + ) diff --git a/src/digitalocean_genai_sdk/resources/threads/__init__.py b/src/digitalocean_genai_sdk/resources/threads/__init__.py new file mode 100644 index 00000000..736b9bd6 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/threads/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .runs import ( + RunsResource, + AsyncRunsResource, + RunsResourceWithRawResponse, + AsyncRunsResourceWithRawResponse, + RunsResourceWithStreamingResponse, + AsyncRunsResourceWithStreamingResponse, +) +from .threads import ( + ThreadsResource, + AsyncThreadsResource, + ThreadsResourceWithRawResponse, + AsyncThreadsResourceWithRawResponse, + ThreadsResourceWithStreamingResponse, + AsyncThreadsResourceWithStreamingResponse, +) +from .messages import ( + MessagesResource, + AsyncMessagesResource, + MessagesResourceWithRawResponse, + AsyncMessagesResourceWithRawResponse, + MessagesResourceWithStreamingResponse, + AsyncMessagesResourceWithStreamingResponse, +) + +__all__ = [ + "RunsResource", + "AsyncRunsResource", + "RunsResourceWithRawResponse", + "AsyncRunsResourceWithRawResponse", + "RunsResourceWithStreamingResponse", + "AsyncRunsResourceWithStreamingResponse", + "MessagesResource", + "AsyncMessagesResource", + "MessagesResourceWithRawResponse", + "AsyncMessagesResourceWithRawResponse", + "MessagesResourceWithStreamingResponse", + "AsyncMessagesResourceWithStreamingResponse", + "ThreadsResource", + "AsyncThreadsResource", + "ThreadsResourceWithRawResponse", + "AsyncThreadsResourceWithRawResponse", + "ThreadsResourceWithStreamingResponse", + "AsyncThreadsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/threads/messages.py b/src/digitalocean_genai_sdk/resources/threads/messages.py new file mode 100644 index 00000000..e62eb94c --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/threads/messages.py @@ -0,0 +1,654 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.threads import message_list_params, message_create_params, message_update_params +from ...types.threads.message_object import MessageObject +from ...types.threads.message_list_response import MessageListResponse +from ...types.threads.message_delete_response import MessageDeleteResponse + +__all__ = ["MessagesResource", "AsyncMessagesResource"] + + +class MessagesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> MessagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return MessagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> MessagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return MessagesResourceWithStreamingResponse(self) + + def create( + self, + thread_id: str, + *, + content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]], + role: Literal["user", "assistant"], + attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageObject: + """ + Create a message. + + Args: + content: The text contents of the message. + + role: + The role of the entity that is creating the message. Allowed values include: + + - `user`: Indicates the message is sent by an actual user and should be used in + most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this + value to insert messages from the assistant into the conversation. + + attachments: A list of files attached to the message, and the tools they should be added to. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._post( + f"/threads/{thread_id}/messages", + body=maybe_transform( + { + "content": content, + "role": role, + "attachments": attachments, + "metadata": metadata, + }, + message_create_params.MessageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageObject, + ) + + def retrieve( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageObject: + """ + Retrieve a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + return self._get( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageObject, + ) + + def update( + self, + message_id: str, + *, + thread_id: str, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageObject: + """ + Modifies a message. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + return self._post( + f"/threads/{thread_id}/messages/{message_id}", + body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageObject, + ) + + def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + run_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageListResponse: + """ + Returns a list of messages for a given thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + run_id: Filter messages by the run ID that generated them. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._get( + f"/threads/{thread_id}/messages", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + "run_id": run_id, + }, + message_list_params.MessageListParams, + ), + ), + cast_to=MessageListResponse, + ) + + def delete( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageDeleteResponse: + """ + Deletes a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + return self._delete( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageDeleteResponse, + ) + + +class AsyncMessagesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncMessagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncMessagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncMessagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncMessagesResourceWithStreamingResponse(self) + + async def create( + self, + thread_id: str, + *, + content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]], + role: Literal["user", "assistant"], + attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageObject: + """ + Create a message. + + Args: + content: The text contents of the message. + + role: + The role of the entity that is creating the message. Allowed values include: + + - `user`: Indicates the message is sent by an actual user and should be used in + most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this + value to insert messages from the assistant into the conversation. + + attachments: A list of files attached to the message, and the tools they should be added to. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._post( + f"/threads/{thread_id}/messages", + body=await async_maybe_transform( + { + "content": content, + "role": role, + "attachments": attachments, + "metadata": metadata, + }, + message_create_params.MessageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageObject, + ) + + async def retrieve( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageObject: + """ + Retrieve a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + return await self._get( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageObject, + ) + + async def update( + self, + message_id: str, + *, + thread_id: str, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageObject: + """ + Modifies a message. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + return await self._post( + f"/threads/{thread_id}/messages/{message_id}", + body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageObject, + ) + + async def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + run_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageListResponse: + """ + Returns a list of messages for a given thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + run_id: Filter messages by the run ID that generated them. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._get( + f"/threads/{thread_id}/messages", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + "run_id": run_id, + }, + message_list_params.MessageListParams, + ), + ), + cast_to=MessageListResponse, + ) + + async def delete( + self, + message_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> MessageDeleteResponse: + """ + Deletes a message. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not message_id: + raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") + return await self._delete( + f"/threads/{thread_id}/messages/{message_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=MessageDeleteResponse, + ) + + +class MessagesResourceWithRawResponse: + def __init__(self, messages: MessagesResource) -> None: + self._messages = messages + + self.create = to_raw_response_wrapper( + messages.create, + ) + self.retrieve = to_raw_response_wrapper( + messages.retrieve, + ) + self.update = to_raw_response_wrapper( + messages.update, + ) + self.list = to_raw_response_wrapper( + messages.list, + ) + self.delete = to_raw_response_wrapper( + messages.delete, + ) + + +class AsyncMessagesResourceWithRawResponse: + def __init__(self, messages: AsyncMessagesResource) -> None: + self._messages = messages + + self.create = async_to_raw_response_wrapper( + messages.create, + ) + self.retrieve = async_to_raw_response_wrapper( + messages.retrieve, + ) + self.update = async_to_raw_response_wrapper( + messages.update, + ) + self.list = async_to_raw_response_wrapper( + messages.list, + ) + self.delete = async_to_raw_response_wrapper( + messages.delete, + ) + + +class MessagesResourceWithStreamingResponse: + def __init__(self, messages: MessagesResource) -> None: + self._messages = messages + + self.create = to_streamed_response_wrapper( + messages.create, + ) + self.retrieve = to_streamed_response_wrapper( + messages.retrieve, + ) + self.update = to_streamed_response_wrapper( + messages.update, + ) + self.list = to_streamed_response_wrapper( + messages.list, + ) + self.delete = to_streamed_response_wrapper( + messages.delete, + ) + + +class AsyncMessagesResourceWithStreamingResponse: + def __init__(self, messages: AsyncMessagesResource) -> None: + self._messages = messages + + self.create = async_to_streamed_response_wrapper( + messages.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + messages.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + messages.update, + ) + self.list = async_to_streamed_response_wrapper( + messages.list, + ) + self.delete = async_to_streamed_response_wrapper( + messages.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py b/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py new file mode 100644 index 00000000..70942400 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .runs import ( + RunsResource, + AsyncRunsResource, + RunsResourceWithRawResponse, + AsyncRunsResourceWithRawResponse, + RunsResourceWithStreamingResponse, + AsyncRunsResourceWithStreamingResponse, +) +from .steps import ( + StepsResource, + AsyncStepsResource, + StepsResourceWithRawResponse, + AsyncStepsResourceWithRawResponse, + StepsResourceWithStreamingResponse, + AsyncStepsResourceWithStreamingResponse, +) + +__all__ = [ + "StepsResource", + "AsyncStepsResource", + "StepsResourceWithRawResponse", + "AsyncStepsResourceWithRawResponse", + "StepsResourceWithStreamingResponse", + "AsyncStepsResourceWithStreamingResponse", + "RunsResource", + "AsyncRunsResource", + "RunsResourceWithRawResponse", + "AsyncRunsResourceWithRawResponse", + "RunsResourceWithStreamingResponse", + "AsyncRunsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py new file mode 100644 index 00000000..a270b7a9 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py @@ -0,0 +1,1427 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal + +import httpx + +from .steps import ( + StepsResource, + AsyncStepsResource, + StepsResourceWithRawResponse, + AsyncStepsResourceWithRawResponse, + StepsResourceWithStreamingResponse, + AsyncStepsResourceWithStreamingResponse, +) +from ....types import ReasoningEffort +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.threads import ( + run_list_params, + run_create_params, + run_update_params, + run_create_run_params, + run_submit_tool_outputs_params, +) +from ....types.reasoning_effort import ReasoningEffort +from ....types.threads.run_object import RunObject +from ....types.threads.run_list_response import RunListResponse +from ....types.assistant_supported_models import AssistantSupportedModels +from ....types.create_thread_request_param import CreateThreadRequestParam +from ....types.threads.truncation_object_param import TruncationObjectParam +from ....types.threads.create_message_request_param import CreateMessageRequestParam +from ....types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam +from ....types.threads.assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam + +__all__ = ["RunsResource", "AsyncRunsResource"] + + +class RunsResource(SyncAPIResource): + @cached_property + def steps(self) -> StepsResource: + return StepsResource(self._client) + + @cached_property + def with_raw_response(self) -> RunsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return RunsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RunsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return RunsResourceWithStreamingResponse(self) + + def create( + self, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: Union[ + str, + Literal[ + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], + None, + ] + | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN, + tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, + tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Create a thread and run it in one request. + + Args: + assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this + run. + + instructions: Override the default system message of the assistant. This is useful for + modifying the behavior on a per-run basis. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + model: The ID of the [Model](/docs/api-reference/models) to be used to execute this + run. If a value is provided here, it will override the model associated with the + assistant. If not, the model associated with the assistant will be used. + + parallel_tool_calls: Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + tool_resources: A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. For example, the `code_interpreter` tool requires + a list of file IDs, while the `file_search` tool requires a list of vector store + IDs. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/threads/runs", + body=maybe_transform( + { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + }, + run_create_params.RunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + def retrieve( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Retrieves a run. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return self._get( + f"/threads/{thread_id}/runs/{run_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + def update( + self, + run_id: str, + *, + thread_id: str, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Modifies a run. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return self._post( + f"/threads/{thread_id}/runs/{run_id}", + body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunListResponse: + """ + Returns a list of runs belonging to a thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._get( + f"/threads/{thread_id}/runs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + run_list_params.RunListParams, + ), + ), + cast_to=RunListResponse, + ) + + def cancel( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Cancels a run that is `in_progress`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return self._post( + f"/threads/{thread_id}/runs/{run_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, + additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Create a run. + + Args: + assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this + run. + + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + + additional_instructions: Appends additional instructions at the end of the instructions for the run. This + is useful for modifying the behavior on a per-run basis without overriding other + instructions. + + additional_messages: Adds additional messages to the thread before creating the run. + + instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of + the assistant. This is useful for modifying the behavior on a per-run basis. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + model: The ID of the [Model](/docs/api-reference/models) to be used to execute this + run. If a value is provided here, it will override the model associated with the + assistant. If not, the model associated with the assistant will be used. + + parallel_tool_calls: Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._post( + f"/threads/{thread_id}/runs", + body=maybe_transform( + { + "assistant_id": assistant_id, + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "stream": stream, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + }, + run_create_run_params.RunCreateRunParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams), + ), + cast_to=RunObject, + ) + + def submit_tool_outputs( + self, + run_id: str, + *, + thread_id: str, + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], + stream: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + tool_outputs: A list of tools for which the outputs are being submitted. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return self._post( + f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", + body=maybe_transform( + { + "tool_outputs": tool_outputs, + "stream": stream, + }, + run_submit_tool_outputs_params.RunSubmitToolOutputsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + +class AsyncRunsResource(AsyncAPIResource): + @cached_property + def steps(self) -> AsyncStepsResource: + return AsyncStepsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncRunsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncRunsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncRunsResourceWithStreamingResponse(self) + + async def create( + self, + *, + assistant_id: str, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: Union[ + str, + Literal[ + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], + None, + ] + | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN, + tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, + tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Create a thread and run it in one request. + + Args: + assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this + run. + + instructions: Override the default system message of the assistant. This is useful for + modifying the behavior on a per-run basis. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + model: The ID of the [Model](/docs/api-reference/models) to be used to execute this + run. If a value is provided here, it will override the model associated with the + assistant. If not, the model associated with the assistant will be used. + + parallel_tool_calls: Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + tool_resources: A set of resources that are used by the assistant's tools. The resources are + specific to the type of tool. For example, the `code_interpreter` tool requires + a list of file IDs, while the `file_search` tool requires a list of vector store + IDs. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/threads/runs", + body=await async_maybe_transform( + { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "response_format": response_format, + "stream": stream, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + }, + run_create_params.RunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + async def retrieve( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Retrieves a run. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return await self._get( + f"/threads/{thread_id}/runs/{run_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + async def update( + self, + run_id: str, + *, + thread_id: str, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Modifies a run. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return await self._post( + f"/threads/{thread_id}/runs/{run_id}", + body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + async def list( + self, + thread_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunListResponse: + """ + Returns a list of runs belonging to a thread. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._get( + f"/threads/{thread_id}/runs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + run_list_params.RunListParams, + ), + ), + cast_to=RunListResponse, + ) + + async def cancel( + self, + run_id: str, + *, + thread_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Cancels a run that is `in_progress`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return await self._post( + f"/threads/{thread_id}/runs/{run_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, + additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, + additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN, + instructions: Optional[str] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, + tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + Create a run. + + Args: + assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this + run. + + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + + additional_instructions: Appends additional instructions at the end of the instructions for the run. This + is useful for modifying the behavior on a per-run basis without overriding other + instructions. + + additional_messages: Adds additional messages to the thread before creating the run. + + instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of + the assistant. This is useful for modifying the behavior on a per-run basis. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + model: The ID of the [Model](/docs/api-reference/models) to be used to execute this + run. If a value is provided here, it will override the model associated with the + assistant. If not, the model associated with the assistant will be used. + + parallel_tool_calls: Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + + reasoning_effort: **o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. + + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + tools: Override the tools the assistant can use for this run. This is useful for + modifying the behavior on a per-run basis. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + + truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to + control the intial context window of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._post( + f"/threads/{thread_id}/runs", + body=await async_maybe_transform( + { + "assistant_id": assistant_id, + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "parallel_tool_calls": parallel_tool_calls, + "reasoning_effort": reasoning_effort, + "response_format": response_format, + "stream": stream, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + }, + run_create_run_params.RunCreateRunParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams), + ), + cast_to=RunObject, + ) + + async def submit_tool_outputs( + self, + run_id: str, + *, + thread_id: str, + tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], + stream: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunObject: + """ + When a run has the `status: "requires_action"` and `required_action.type` is + `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + tool calls once they're all completed. All outputs must be submitted in a single + request. + + Args: + tool_outputs: A list of tools for which the outputs are being submitted. + + stream: If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return await self._post( + f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", + body=await async_maybe_transform( + { + "tool_outputs": tool_outputs, + "stream": stream, + }, + run_submit_tool_outputs_params.RunSubmitToolOutputsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RunObject, + ) + + +class RunsResourceWithRawResponse: + def __init__(self, runs: RunsResource) -> None: + self._runs = runs + + self.create = to_raw_response_wrapper( + runs.create, + ) + self.retrieve = to_raw_response_wrapper( + runs.retrieve, + ) + self.update = to_raw_response_wrapper( + runs.update, + ) + self.list = to_raw_response_wrapper( + runs.list, + ) + self.cancel = to_raw_response_wrapper( + runs.cancel, + ) + self.create_run = to_raw_response_wrapper( + runs.create_run, + ) + self.submit_tool_outputs = to_raw_response_wrapper( + runs.submit_tool_outputs, + ) + + @cached_property + def steps(self) -> StepsResourceWithRawResponse: + return StepsResourceWithRawResponse(self._runs.steps) + + +class AsyncRunsResourceWithRawResponse: + def __init__(self, runs: AsyncRunsResource) -> None: + self._runs = runs + + self.create = async_to_raw_response_wrapper( + runs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + runs.retrieve, + ) + self.update = async_to_raw_response_wrapper( + runs.update, + ) + self.list = async_to_raw_response_wrapper( + runs.list, + ) + self.cancel = async_to_raw_response_wrapper( + runs.cancel, + ) + self.create_run = async_to_raw_response_wrapper( + runs.create_run, + ) + self.submit_tool_outputs = async_to_raw_response_wrapper( + runs.submit_tool_outputs, + ) + + @cached_property + def steps(self) -> AsyncStepsResourceWithRawResponse: + return AsyncStepsResourceWithRawResponse(self._runs.steps) + + +class RunsResourceWithStreamingResponse: + def __init__(self, runs: RunsResource) -> None: + self._runs = runs + + self.create = to_streamed_response_wrapper( + runs.create, + ) + self.retrieve = to_streamed_response_wrapper( + runs.retrieve, + ) + self.update = to_streamed_response_wrapper( + runs.update, + ) + self.list = to_streamed_response_wrapper( + runs.list, + ) + self.cancel = to_streamed_response_wrapper( + runs.cancel, + ) + self.create_run = to_streamed_response_wrapper( + runs.create_run, + ) + self.submit_tool_outputs = to_streamed_response_wrapper( + runs.submit_tool_outputs, + ) + + @cached_property + def steps(self) -> StepsResourceWithStreamingResponse: + return StepsResourceWithStreamingResponse(self._runs.steps) + + +class AsyncRunsResourceWithStreamingResponse: + def __init__(self, runs: AsyncRunsResource) -> None: + self._runs = runs + + self.create = async_to_streamed_response_wrapper( + runs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + runs.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + runs.update, + ) + self.list = async_to_streamed_response_wrapper( + runs.list, + ) + self.cancel = async_to_streamed_response_wrapper( + runs.cancel, + ) + self.create_run = async_to_streamed_response_wrapper( + runs.create_run, + ) + self.submit_tool_outputs = async_to_streamed_response_wrapper( + runs.submit_tool_outputs, + ) + + @cached_property + def steps(self) -> AsyncStepsResourceWithStreamingResponse: + return AsyncStepsResourceWithStreamingResponse(self._runs.steps) diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py new file mode 100644 index 00000000..2b5ffd09 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py @@ -0,0 +1,375 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.threads.runs import step_list_params, step_retrieve_params +from ....types.threads.runs.run_step_object import RunStepObject +from ....types.threads.runs.step_list_response import StepListResponse + +__all__ = ["StepsResource", "AsyncStepsResource"] + + +class StepsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> StepsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return StepsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> StepsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return StepsResourceWithStreamingResponse(self) + + def retrieve( + self, + step_id: str, + *, + thread_id: str, + run_id: str, + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunStepObject: + """ + Retrieves a run step. + + Args: + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + if not step_id: + raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") + return self._get( + f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), + ), + cast_to=RunStepObject, + ) + + def list( + self, + run_id: str, + *, + thread_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> StepListResponse: + """ + Returns a list of run steps belonging to a run. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return self._get( + f"/threads/{thread_id}/runs/{run_id}/steps", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "include": include, + "limit": limit, + "order": order, + }, + step_list_params.StepListParams, + ), + ), + cast_to=StepListResponse, + ) + + +class AsyncStepsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncStepsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncStepsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncStepsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncStepsResourceWithStreamingResponse(self) + + async def retrieve( + self, + step_id: str, + *, + thread_id: str, + run_id: str, + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RunStepObject: + """ + Retrieves a run step. + + Args: + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + if not step_id: + raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") + return await self._get( + f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), + ), + cast_to=RunStepObject, + ) + + async def list( + self, + run_id: str, + *, + thread_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> StepListResponse: + """ + Returns a list of run steps belonging to a run. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + if not run_id: + raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") + return await self._get( + f"/threads/{thread_id}/runs/{run_id}/steps", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "include": include, + "limit": limit, + "order": order, + }, + step_list_params.StepListParams, + ), + ), + cast_to=StepListResponse, + ) + + +class StepsResourceWithRawResponse: + def __init__(self, steps: StepsResource) -> None: + self._steps = steps + + self.retrieve = to_raw_response_wrapper( + steps.retrieve, + ) + self.list = to_raw_response_wrapper( + steps.list, + ) + + +class AsyncStepsResourceWithRawResponse: + def __init__(self, steps: AsyncStepsResource) -> None: + self._steps = steps + + self.retrieve = async_to_raw_response_wrapper( + steps.retrieve, + ) + self.list = async_to_raw_response_wrapper( + steps.list, + ) + + +class StepsResourceWithStreamingResponse: + def __init__(self, steps: StepsResource) -> None: + self._steps = steps + + self.retrieve = to_streamed_response_wrapper( + steps.retrieve, + ) + self.list = to_streamed_response_wrapper( + steps.list, + ) + + +class AsyncStepsResourceWithStreamingResponse: + def __init__(self, steps: AsyncStepsResource) -> None: + self._steps = steps + + self.retrieve = async_to_streamed_response_wrapper( + steps.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + steps.list, + ) diff --git a/src/digitalocean_genai_sdk/resources/threads/threads.py b/src/digitalocean_genai_sdk/resources/threads/threads.py new file mode 100644 index 00000000..64062ffb --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/threads/threads.py @@ -0,0 +1,553 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable, Optional + +import httpx + +from ...types import thread_create_params, thread_update_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from .messages import ( + MessagesResource, + AsyncMessagesResource, + MessagesResourceWithRawResponse, + AsyncMessagesResourceWithRawResponse, + MessagesResourceWithStreamingResponse, + AsyncMessagesResourceWithStreamingResponse, +) +from ..._compat import cached_property +from .runs.runs import ( + RunsResource, + AsyncRunsResource, + RunsResourceWithRawResponse, + AsyncRunsResourceWithRawResponse, + RunsResourceWithStreamingResponse, + AsyncRunsResourceWithStreamingResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.thread_object import ThreadObject +from ...types.thread_delete_response import ThreadDeleteResponse +from ...types.threads.create_message_request_param import CreateMessageRequestParam + +__all__ = ["ThreadsResource", "AsyncThreadsResource"] + + +class ThreadsResource(SyncAPIResource): + @cached_property + def runs(self) -> RunsResource: + return RunsResource(self._client) + + @cached_property + def messages(self) -> MessagesResource: + return MessagesResource(self._client) + + @cached_property + def with_raw_response(self) -> ThreadsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ThreadsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ThreadsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ThreadsResourceWithStreamingResponse(self) + + def create( + self, + *, + messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadObject: + """ + Create a thread. + + Args: + messages: A list of [messages](/docs/api-reference/messages) to start the thread with. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + tool_resources: A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/threads", + body=maybe_transform( + { + "messages": messages, + "metadata": metadata, + "tool_resources": tool_resources, + }, + thread_create_params.ThreadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadObject, + ) + + def retrieve( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadObject: + """ + Retrieves a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._get( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadObject, + ) + + def update( + self, + thread_id: str, + *, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadObject: + """ + Modifies a thread. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + tool_resources: A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._post( + f"/threads/{thread_id}", + body=maybe_transform( + { + "metadata": metadata, + "tool_resources": tool_resources, + }, + thread_update_params.ThreadUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadObject, + ) + + def delete( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadDeleteResponse: + """ + Delete a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return self._delete( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadDeleteResponse, + ) + + +class AsyncThreadsResource(AsyncAPIResource): + @cached_property + def runs(self) -> AsyncRunsResource: + return AsyncRunsResource(self._client) + + @cached_property + def messages(self) -> AsyncMessagesResource: + return AsyncMessagesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncThreadsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncThreadsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncThreadsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncThreadsResourceWithStreamingResponse(self) + + async def create( + self, + *, + messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadObject: + """ + Create a thread. + + Args: + messages: A list of [messages](/docs/api-reference/messages) to start the thread with. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + tool_resources: A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/threads", + body=await async_maybe_transform( + { + "messages": messages, + "metadata": metadata, + "tool_resources": tool_resources, + }, + thread_create_params.ThreadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadObject, + ) + + async def retrieve( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadObject: + """ + Retrieves a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._get( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadObject, + ) + + async def update( + self, + thread_id: str, + *, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadObject: + """ + Modifies a thread. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + tool_resources: A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._post( + f"/threads/{thread_id}", + body=await async_maybe_transform( + { + "metadata": metadata, + "tool_resources": tool_resources, + }, + thread_update_params.ThreadUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadObject, + ) + + async def delete( + self, + thread_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ThreadDeleteResponse: + """ + Delete a thread. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not thread_id: + raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") + return await self._delete( + f"/threads/{thread_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ThreadDeleteResponse, + ) + + +class ThreadsResourceWithRawResponse: + def __init__(self, threads: ThreadsResource) -> None: + self._threads = threads + + self.create = to_raw_response_wrapper( + threads.create, + ) + self.retrieve = to_raw_response_wrapper( + threads.retrieve, + ) + self.update = to_raw_response_wrapper( + threads.update, + ) + self.delete = to_raw_response_wrapper( + threads.delete, + ) + + @cached_property + def runs(self) -> RunsResourceWithRawResponse: + return RunsResourceWithRawResponse(self._threads.runs) + + @cached_property + def messages(self) -> MessagesResourceWithRawResponse: + return MessagesResourceWithRawResponse(self._threads.messages) + + +class AsyncThreadsResourceWithRawResponse: + def __init__(self, threads: AsyncThreadsResource) -> None: + self._threads = threads + + self.create = async_to_raw_response_wrapper( + threads.create, + ) + self.retrieve = async_to_raw_response_wrapper( + threads.retrieve, + ) + self.update = async_to_raw_response_wrapper( + threads.update, + ) + self.delete = async_to_raw_response_wrapper( + threads.delete, + ) + + @cached_property + def runs(self) -> AsyncRunsResourceWithRawResponse: + return AsyncRunsResourceWithRawResponse(self._threads.runs) + + @cached_property + def messages(self) -> AsyncMessagesResourceWithRawResponse: + return AsyncMessagesResourceWithRawResponse(self._threads.messages) + + +class ThreadsResourceWithStreamingResponse: + def __init__(self, threads: ThreadsResource) -> None: + self._threads = threads + + self.create = to_streamed_response_wrapper( + threads.create, + ) + self.retrieve = to_streamed_response_wrapper( + threads.retrieve, + ) + self.update = to_streamed_response_wrapper( + threads.update, + ) + self.delete = to_streamed_response_wrapper( + threads.delete, + ) + + @cached_property + def runs(self) -> RunsResourceWithStreamingResponse: + return RunsResourceWithStreamingResponse(self._threads.runs) + + @cached_property + def messages(self) -> MessagesResourceWithStreamingResponse: + return MessagesResourceWithStreamingResponse(self._threads.messages) + + +class AsyncThreadsResourceWithStreamingResponse: + def __init__(self, threads: AsyncThreadsResource) -> None: + self._threads = threads + + self.create = async_to_streamed_response_wrapper( + threads.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + threads.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + threads.update, + ) + self.delete = async_to_streamed_response_wrapper( + threads.delete, + ) + + @cached_property + def runs(self) -> AsyncRunsResourceWithStreamingResponse: + return AsyncRunsResourceWithStreamingResponse(self._threads.runs) + + @cached_property + def messages(self) -> AsyncMessagesResourceWithStreamingResponse: + return AsyncMessagesResourceWithStreamingResponse(self._threads.messages) diff --git a/src/digitalocean_genai_sdk/resources/uploads.py b/src/digitalocean_genai_sdk/resources/uploads.py new file mode 100644 index 00000000..30ba91b5 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/uploads.py @@ -0,0 +1,573 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Mapping, cast +from typing_extensions import Literal + +import httpx + +from ..types import upload_create_params, upload_add_part_params, upload_complete_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.upload import Upload +from ..types.upload_add_part_response import UploadAddPartResponse + +__all__ = ["UploadsResource", "AsyncUploadsResource"] + + +class UploadsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> UploadsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return UploadsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> UploadsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return UploadsResourceWithStreamingResponse(self) + + def create( + self, + *, + bytes: int, + filename: str, + mime_type: str, + purpose: Literal["assistants", "batch", "fine-tune", "vision"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that + you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an + Upload can accept at most 8 GB in total and expires after an hour after you + create it. + + Once you complete the Upload, we will create a + [File](/docs/api-reference/files/object) object that contains all the parts you + uploaded. This File is usable in the rest of our platform as a regular File + object. + + For certain `purpose` values, the correct `mime_type` must be specified. Please + refer to documentation for the + [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). + + For guidance on the proper filename extensions for each purpose, please follow + the documentation on [creating a File](/docs/api-reference/files/create). + + Args: + bytes: The number of bytes in the file you are uploading. + + filename: The name of the file to upload. + + mime_type: The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the + supported MIME types for assistants and vision. + + purpose: The intended purpose of the uploaded file. + + See the + [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/uploads", + body=maybe_transform( + { + "bytes": bytes, + "filename": filename, + "mime_type": mime_type, + "purpose": purpose, + }, + upload_create_params.UploadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + def add_part( + self, + upload_id: str, + *, + data: FileTypes, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UploadAddPartResponse: + """ + Adds a [Part](/docs/api-reference/uploads/part-object) to an + [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk + of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended + order of the Parts when you + [complete the Upload](/docs/api-reference/uploads/complete). + + Args: + data: The chunk of bytes for this Part. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + body = deepcopy_minimal({"data": data}) + files = extract_files(cast(Mapping[str, object], body), paths=[["data"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + f"/uploads/{upload_id}/parts", + body=maybe_transform(body, upload_add_part_params.UploadAddPartParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UploadAddPartResponse, + ) + + def cancel( + self, + upload_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """Cancels the Upload. + + No Parts may be added after an Upload is cancelled. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return self._post( + f"/uploads/{upload_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + def complete( + self, + upload_id: str, + *, + part_ids: List[str], + md5: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested + [File](/docs/api-reference/files/object) object that is ready to use in the rest + of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part + IDs. + + The number of bytes uploaded upon completion must match the number of bytes + initially specified when creating the Upload object. No Parts may be added after + an Upload is completed. + + Args: + part_ids: The ordered list of Part IDs. + + md5: The optional md5 checksum for the file contents to verify if the bytes uploaded + matches what you expect. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return self._post( + f"/uploads/{upload_id}/complete", + body=maybe_transform( + { + "part_ids": part_ids, + "md5": md5, + }, + upload_complete_params.UploadCompleteParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + +class AsyncUploadsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncUploadsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncUploadsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncUploadsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncUploadsResourceWithStreamingResponse(self) + + async def create( + self, + *, + bytes: int, + filename: str, + mime_type: str, + purpose: Literal["assistants", "batch", "fine-tune", "vision"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that + you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an + Upload can accept at most 8 GB in total and expires after an hour after you + create it. + + Once you complete the Upload, we will create a + [File](/docs/api-reference/files/object) object that contains all the parts you + uploaded. This File is usable in the rest of our platform as a regular File + object. + + For certain `purpose` values, the correct `mime_type` must be specified. Please + refer to documentation for the + [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). + + For guidance on the proper filename extensions for each purpose, please follow + the documentation on [creating a File](/docs/api-reference/files/create). + + Args: + bytes: The number of bytes in the file you are uploading. + + filename: The name of the file to upload. + + mime_type: The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the + supported MIME types for assistants and vision. + + purpose: The intended purpose of the uploaded file. + + See the + [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/uploads", + body=await async_maybe_transform( + { + "bytes": bytes, + "filename": filename, + "mime_type": mime_type, + "purpose": purpose, + }, + upload_create_params.UploadCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + async def add_part( + self, + upload_id: str, + *, + data: FileTypes, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> UploadAddPartResponse: + """ + Adds a [Part](/docs/api-reference/uploads/part-object) to an + [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk + of bytes from the file you are trying to upload. + + Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + maximum of 8 GB. + + It is possible to add multiple Parts in parallel. You can decide the intended + order of the Parts when you + [complete the Upload](/docs/api-reference/uploads/complete). + + Args: + data: The chunk of bytes for this Part. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + body = deepcopy_minimal({"data": data}) + files = extract_files(cast(Mapping[str, object], body), paths=[["data"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + f"/uploads/{upload_id}/parts", + body=await async_maybe_transform(body, upload_add_part_params.UploadAddPartParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=UploadAddPartResponse, + ) + + async def cancel( + self, + upload_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """Cancels the Upload. + + No Parts may be added after an Upload is cancelled. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return await self._post( + f"/uploads/{upload_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + async def complete( + self, + upload_id: str, + *, + part_ids: List[str], + md5: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Upload: + """ + Completes the [Upload](/docs/api-reference/uploads/object). + + Within the returned Upload object, there is a nested + [File](/docs/api-reference/files/object) object that is ready to use in the rest + of the platform. + + You can specify the order of the Parts by passing in an ordered list of the Part + IDs. + + The number of bytes uploaded upon completion must match the number of bytes + initially specified when creating the Upload object. No Parts may be added after + an Upload is completed. + + Args: + part_ids: The ordered list of Part IDs. + + md5: The optional md5 checksum for the file contents to verify if the bytes uploaded + matches what you expect. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not upload_id: + raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") + return await self._post( + f"/uploads/{upload_id}/complete", + body=await async_maybe_transform( + { + "part_ids": part_ids, + "md5": md5, + }, + upload_complete_params.UploadCompleteParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Upload, + ) + + +class UploadsResourceWithRawResponse: + def __init__(self, uploads: UploadsResource) -> None: + self._uploads = uploads + + self.create = to_raw_response_wrapper( + uploads.create, + ) + self.add_part = to_raw_response_wrapper( + uploads.add_part, + ) + self.cancel = to_raw_response_wrapper( + uploads.cancel, + ) + self.complete = to_raw_response_wrapper( + uploads.complete, + ) + + +class AsyncUploadsResourceWithRawResponse: + def __init__(self, uploads: AsyncUploadsResource) -> None: + self._uploads = uploads + + self.create = async_to_raw_response_wrapper( + uploads.create, + ) + self.add_part = async_to_raw_response_wrapper( + uploads.add_part, + ) + self.cancel = async_to_raw_response_wrapper( + uploads.cancel, + ) + self.complete = async_to_raw_response_wrapper( + uploads.complete, + ) + + +class UploadsResourceWithStreamingResponse: + def __init__(self, uploads: UploadsResource) -> None: + self._uploads = uploads + + self.create = to_streamed_response_wrapper( + uploads.create, + ) + self.add_part = to_streamed_response_wrapper( + uploads.add_part, + ) + self.cancel = to_streamed_response_wrapper( + uploads.cancel, + ) + self.complete = to_streamed_response_wrapper( + uploads.complete, + ) + + +class AsyncUploadsResourceWithStreamingResponse: + def __init__(self, uploads: AsyncUploadsResource) -> None: + self._uploads = uploads + + self.create = async_to_streamed_response_wrapper( + uploads.create, + ) + self.add_part = async_to_streamed_response_wrapper( + uploads.add_part, + ) + self.cancel = async_to_streamed_response_wrapper( + uploads.cancel, + ) + self.complete = async_to_streamed_response_wrapper( + uploads.complete, + ) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py b/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py new file mode 100644 index 00000000..a754f147 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) +from .file_batches import ( + FileBatchesResource, + AsyncFileBatchesResource, + FileBatchesResourceWithRawResponse, + AsyncFileBatchesResourceWithRawResponse, + FileBatchesResourceWithStreamingResponse, + AsyncFileBatchesResourceWithStreamingResponse, +) +from .vector_stores import ( + VectorStoresResource, + AsyncVectorStoresResource, + VectorStoresResourceWithRawResponse, + AsyncVectorStoresResourceWithRawResponse, + VectorStoresResourceWithStreamingResponse, + AsyncVectorStoresResourceWithStreamingResponse, +) + +__all__ = [ + "FileBatchesResource", + "AsyncFileBatchesResource", + "FileBatchesResourceWithRawResponse", + "AsyncFileBatchesResourceWithRawResponse", + "FileBatchesResourceWithStreamingResponse", + "AsyncFileBatchesResourceWithStreamingResponse", + "FilesResource", + "AsyncFilesResource", + "FilesResourceWithRawResponse", + "AsyncFilesResourceWithRawResponse", + "FilesResourceWithStreamingResponse", + "AsyncFilesResourceWithStreamingResponse", + "VectorStoresResource", + "AsyncVectorStoresResource", + "VectorStoresResourceWithRawResponse", + "AsyncVectorStoresResourceWithRawResponse", + "VectorStoresResourceWithStreamingResponse", + "AsyncVectorStoresResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py new file mode 100644 index 00000000..0c4334ce --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py @@ -0,0 +1,544 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.vector_stores import ChunkingStrategyRequestParam, file_batch_create_params, file_batch_list_files_params +from ...types.vector_stores.vector_store_file_batch_object import VectorStoreFileBatchObject +from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam +from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse + +__all__ = ["FileBatchesResource", "AsyncFileBatchesResource"] + + +class FileBatchesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FileBatchesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return FileBatchesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FileBatchesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return FileBatchesResourceWithStreamingResponse(self) + + def create( + self, + vector_store_id: str, + *, + file_ids: List[str], + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, + chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileBatchObject: + """ + Create a vector store file batch. + + Args: + file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should + use. Useful for tools like `file_search` that can access files. + + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._post( + f"/vector_stores/{vector_store_id}/file_batches", + body=maybe_transform( + { + "file_ids": file_ids, + "attributes": attributes, + "chunking_strategy": chunking_strategy, + }, + file_batch_create_params.FileBatchCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileBatchObject, + ) + + def retrieve( + self, + batch_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileBatchObject: + """ + Retrieves a vector store file batch. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._get( + f"/vector_stores/{vector_store_id}/file_batches/{batch_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileBatchObject, + ) + + def cancel( + self, + batch_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileBatchObject: + """Cancel a vector store file batch. + + This attempts to cancel the processing of + files in this batch as soon as possible. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._post( + f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileBatchObject, + ) + + def list_files( + self, + batch_id: str, + *, + vector_store_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListVectorStoreFilesResponse: + """ + Returns a list of vector store files in a batch. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return self._get( + f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "filter": filter, + "limit": limit, + "order": order, + }, + file_batch_list_files_params.FileBatchListFilesParams, + ), + ), + cast_to=ListVectorStoreFilesResponse, + ) + + +class AsyncFileBatchesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFileBatchesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncFileBatchesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFileBatchesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncFileBatchesResourceWithStreamingResponse(self) + + async def create( + self, + vector_store_id: str, + *, + file_ids: List[str], + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, + chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileBatchObject: + """ + Create a vector store file batch. + + Args: + file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should + use. Useful for tools like `file_search` that can access files. + + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._post( + f"/vector_stores/{vector_store_id}/file_batches", + body=await async_maybe_transform( + { + "file_ids": file_ids, + "attributes": attributes, + "chunking_strategy": chunking_strategy, + }, + file_batch_create_params.FileBatchCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileBatchObject, + ) + + async def retrieve( + self, + batch_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileBatchObject: + """ + Retrieves a vector store file batch. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return await self._get( + f"/vector_stores/{vector_store_id}/file_batches/{batch_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileBatchObject, + ) + + async def cancel( + self, + batch_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileBatchObject: + """Cancel a vector store file batch. + + This attempts to cancel the processing of + files in this batch as soon as possible. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return await self._post( + f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileBatchObject, + ) + + async def list_files( + self, + batch_id: str, + *, + vector_store_id: str, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListVectorStoreFilesResponse: + """ + Returns a list of vector store files in a batch. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not batch_id: + raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") + return await self._get( + f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "filter": filter, + "limit": limit, + "order": order, + }, + file_batch_list_files_params.FileBatchListFilesParams, + ), + ), + cast_to=ListVectorStoreFilesResponse, + ) + + +class FileBatchesResourceWithRawResponse: + def __init__(self, file_batches: FileBatchesResource) -> None: + self._file_batches = file_batches + + self.create = to_raw_response_wrapper( + file_batches.create, + ) + self.retrieve = to_raw_response_wrapper( + file_batches.retrieve, + ) + self.cancel = to_raw_response_wrapper( + file_batches.cancel, + ) + self.list_files = to_raw_response_wrapper( + file_batches.list_files, + ) + + +class AsyncFileBatchesResourceWithRawResponse: + def __init__(self, file_batches: AsyncFileBatchesResource) -> None: + self._file_batches = file_batches + + self.create = async_to_raw_response_wrapper( + file_batches.create, + ) + self.retrieve = async_to_raw_response_wrapper( + file_batches.retrieve, + ) + self.cancel = async_to_raw_response_wrapper( + file_batches.cancel, + ) + self.list_files = async_to_raw_response_wrapper( + file_batches.list_files, + ) + + +class FileBatchesResourceWithStreamingResponse: + def __init__(self, file_batches: FileBatchesResource) -> None: + self._file_batches = file_batches + + self.create = to_streamed_response_wrapper( + file_batches.create, + ) + self.retrieve = to_streamed_response_wrapper( + file_batches.retrieve, + ) + self.cancel = to_streamed_response_wrapper( + file_batches.cancel, + ) + self.list_files = to_streamed_response_wrapper( + file_batches.list_files, + ) + + +class AsyncFileBatchesResourceWithStreamingResponse: + def __init__(self, file_batches: AsyncFileBatchesResource) -> None: + self._file_batches = file_batches + + self.create = async_to_streamed_response_wrapper( + file_batches.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + file_batches.retrieve, + ) + self.cancel = async_to_streamed_response_wrapper( + file_batches.cancel, + ) + self.list_files = async_to_streamed_response_wrapper( + file_batches.list_files, + ) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/files.py b/src/digitalocean_genai_sdk/resources/vector_stores/files.py new file mode 100644 index 00000000..c40d5b11 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/vector_stores/files.py @@ -0,0 +1,733 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.vector_stores import ( + ChunkingStrategyRequestParam, + file_list_params, + file_create_params, + file_update_params, +) +from ...types.vector_stores.file_delete_response import FileDeleteResponse +from ...types.vector_stores.vector_store_file_object import VectorStoreFileObject +from ...types.vector_stores.file_retrieve_content_response import FileRetrieveContentResponse +from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam +from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse + +__all__ = ["FilesResource", "AsyncFilesResource"] + + +class FilesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return FilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return FilesResourceWithStreamingResponse(self) + + def create( + self, + vector_store_id: str, + *, + file_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, + chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileObject: + """ + Create a vector store file by attaching a [File](/docs/api-reference/files) to a + [vector store](/docs/api-reference/vector-stores/object). + + Args: + file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful + for tools like `file_search` that can access files. + + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._post( + f"/vector_stores/{vector_store_id}/files", + body=maybe_transform( + { + "file_id": file_id, + "attributes": attributes, + "chunking_strategy": chunking_strategy, + }, + file_create_params.FileCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileObject, + ) + + def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileObject: + """ + Retrieves a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileObject, + ) + + def update( + self, + file_id: str, + *, + vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileObject: + """ + Update attributes on a vector store file. + + Args: + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._post( + f"/vector_stores/{vector_store_id}/files/{file_id}", + body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileObject, + ) + + def list( + self, + vector_store_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListVectorStoreFilesResponse: + """ + Returns a list of vector store files. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._get( + f"/vector_stores/{vector_store_id}/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "filter": filter, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + cast_to=ListVectorStoreFilesResponse, + ) + + def delete( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """Delete a vector store file. + + This will remove the file from the vector store but + the file itself will not be deleted. To delete the file, use the + [delete file](/docs/api-reference/files/delete) endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._delete( + f"/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + def retrieve_content( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileRetrieveContentResponse: + """ + Retrieve the parsed contents of a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return self._get( + f"/vector_stores/{vector_store_id}/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileRetrieveContentResponse, + ) + + +class AsyncFilesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncFilesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncFilesResourceWithStreamingResponse(self) + + async def create( + self, + vector_store_id: str, + *, + file_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, + chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileObject: + """ + Create a vector store file by attaching a [File](/docs/api-reference/files) to a + [vector store](/docs/api-reference/vector-stores/object). + + Args: + file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful + for tools like `file_search` that can access files. + + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._post( + f"/vector_stores/{vector_store_id}/files", + body=await async_maybe_transform( + { + "file_id": file_id, + "attributes": attributes, + "chunking_strategy": chunking_strategy, + }, + file_create_params.FileCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileObject, + ) + + async def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileObject: + """ + Retrieves a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileObject, + ) + + async def update( + self, + file_id: str, + *, + vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreFileObject: + """ + Update attributes on a vector store file. + + Args: + attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters, booleans, or numbers. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._post( + f"/vector_stores/{vector_store_id}/files/{file_id}", + body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreFileObject, + ) + + async def list( + self, + vector_store_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ListVectorStoreFilesResponse: + """ + Returns a list of vector store files. + + Args: + after: A cursor for use in pagination. `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._get( + f"/vector_stores/{vector_store_id}/files", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "filter": filter, + "limit": limit, + "order": order, + }, + file_list_params.FileListParams, + ), + ), + cast_to=ListVectorStoreFilesResponse, + ) + + async def delete( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileDeleteResponse: + """Delete a vector store file. + + This will remove the file from the vector store but + the file itself will not be deleted. To delete the file, use the + [delete file](/docs/api-reference/files/delete) endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._delete( + f"/vector_stores/{vector_store_id}/files/{file_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileDeleteResponse, + ) + + async def retrieve_content( + self, + file_id: str, + *, + vector_store_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FileRetrieveContentResponse: + """ + Retrieve the parsed contents of a vector store file. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + if not file_id: + raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") + return await self._get( + f"/vector_stores/{vector_store_id}/files/{file_id}/content", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FileRetrieveContentResponse, + ) + + +class FilesResourceWithRawResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.create = to_raw_response_wrapper( + files.create, + ) + self.retrieve = to_raw_response_wrapper( + files.retrieve, + ) + self.update = to_raw_response_wrapper( + files.update, + ) + self.list = to_raw_response_wrapper( + files.list, + ) + self.delete = to_raw_response_wrapper( + files.delete, + ) + self.retrieve_content = to_raw_response_wrapper( + files.retrieve_content, + ) + + +class AsyncFilesResourceWithRawResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.create = async_to_raw_response_wrapper( + files.create, + ) + self.retrieve = async_to_raw_response_wrapper( + files.retrieve, + ) + self.update = async_to_raw_response_wrapper( + files.update, + ) + self.list = async_to_raw_response_wrapper( + files.list, + ) + self.delete = async_to_raw_response_wrapper( + files.delete, + ) + self.retrieve_content = async_to_raw_response_wrapper( + files.retrieve_content, + ) + + +class FilesResourceWithStreamingResponse: + def __init__(self, files: FilesResource) -> None: + self._files = files + + self.create = to_streamed_response_wrapper( + files.create, + ) + self.retrieve = to_streamed_response_wrapper( + files.retrieve, + ) + self.update = to_streamed_response_wrapper( + files.update, + ) + self.list = to_streamed_response_wrapper( + files.list, + ) + self.delete = to_streamed_response_wrapper( + files.delete, + ) + self.retrieve_content = to_streamed_response_wrapper( + files.retrieve_content, + ) + + +class AsyncFilesResourceWithStreamingResponse: + def __init__(self, files: AsyncFilesResource) -> None: + self._files = files + + self.create = async_to_streamed_response_wrapper( + files.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + files.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + files.update, + ) + self.list = async_to_streamed_response_wrapper( + files.list, + ) + self.delete = async_to_streamed_response_wrapper( + files.delete, + ) + self.retrieve_content = async_to_streamed_response_wrapper( + files.retrieve_content, + ) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py new file mode 100644 index 00000000..8ad572ea --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py @@ -0,0 +1,847 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +import httpx + +from .files import ( + FilesResource, + AsyncFilesResource, + FilesResourceWithRawResponse, + AsyncFilesResourceWithRawResponse, + FilesResourceWithStreamingResponse, + AsyncFilesResourceWithStreamingResponse, +) +from ...types import ( + vector_store_list_params, + vector_store_create_params, + vector_store_search_params, + vector_store_update_params, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .file_batches import ( + FileBatchesResource, + AsyncFileBatchesResource, + FileBatchesResourceWithRawResponse, + AsyncFileBatchesResourceWithRawResponse, + FileBatchesResourceWithStreamingResponse, + AsyncFileBatchesResourceWithStreamingResponse, +) +from ..._base_client import make_request_options +from ...types.vector_store_object import VectorStoreObject +from ...types.vector_store_list_response import VectorStoreListResponse +from ...types.vector_store_delete_response import VectorStoreDeleteResponse +from ...types.vector_store_search_response import VectorStoreSearchResponse +from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam + +__all__ = ["VectorStoresResource", "AsyncVectorStoresResource"] + + +class VectorStoresResource(SyncAPIResource): + @cached_property + def file_batches(self) -> FileBatchesResource: + return FileBatchesResource(self._client) + + @cached_property + def files(self) -> FilesResource: + return FilesResource(self._client) + + @cached_property + def with_raw_response(self) -> VectorStoresResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return VectorStoresResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return VectorStoresResourceWithStreamingResponse(self) + + def create( + self, + *, + chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, + file_ids: List[str] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreObject: + """ + Create a vector store. + + Args: + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. Only applicable if `file_ids` is non-empty. + + expires_after: The expiration policy for a vector store. + + file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should + use. Useful for tools like `file_search` that can access files. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + name: The name of the vector store. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/vector_stores", + body=maybe_transform( + { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + }, + vector_store_create_params.VectorStoreCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreObject, + ) + + def retrieve( + self, + vector_store_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreObject: + """ + Retrieves a vector store. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._get( + f"/vector_stores/{vector_store_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreObject, + ) + + def update( + self, + vector_store_id: str, + *, + expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreObject: + """ + Modifies a vector store. + + Args: + expires_after: The expiration policy for a vector store. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + name: The name of the vector store. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._post( + f"/vector_stores/{vector_store_id}", + body=maybe_transform( + { + "expires_after": expires_after, + "metadata": metadata, + "name": name, + }, + vector_store_update_params.VectorStoreUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreObject, + ) + + def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreListResponse: + """Returns a list of vector stores. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/vector_stores", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + vector_store_list_params.VectorStoreListParams, + ), + ), + cast_to=VectorStoreListResponse, + ) + + def delete( + self, + vector_store_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreDeleteResponse: + """ + Delete a vector store. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._delete( + f"/vector_stores/{vector_store_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreDeleteResponse, + ) + + def search( + self, + vector_store_id: str, + *, + query: Union[str, List[str]], + filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, + max_num_results: int | NotGiven = NOT_GIVEN, + ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, + rewrite_query: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreSearchResponse: + """ + Search a vector store for relevant chunks based on a query and file attributes + filter. + + Args: + query: A query string for a search + + filters: A filter to apply based on file attributes. + + max_num_results: The maximum number of results to return. This number should be between 1 and 50 + inclusive. + + ranking_options: Ranking options for search. + + rewrite_query: Whether to rewrite the natural language query for vector search. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return self._post( + f"/vector_stores/{vector_store_id}/search", + body=maybe_transform( + { + "query": query, + "filters": filters, + "max_num_results": max_num_results, + "ranking_options": ranking_options, + "rewrite_query": rewrite_query, + }, + vector_store_search_params.VectorStoreSearchParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreSearchResponse, + ) + + +class AsyncVectorStoresResource(AsyncAPIResource): + @cached_property + def file_batches(self) -> AsyncFileBatchesResource: + return AsyncFileBatchesResource(self._client) + + @cached_property + def files(self) -> AsyncFilesResource: + return AsyncFilesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncVectorStoresResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncVectorStoresResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncVectorStoresResourceWithStreamingResponse(self) + + async def create( + self, + *, + chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, + file_ids: List[str] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreObject: + """ + Create a vector store. + + Args: + chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` + strategy. Only applicable if `file_ids` is non-empty. + + expires_after: The expiration policy for a vector store. + + file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should + use. Useful for tools like `file_search` that can access files. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + name: The name of the vector store. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/vector_stores", + body=await async_maybe_transform( + { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + }, + vector_store_create_params.VectorStoreCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreObject, + ) + + async def retrieve( + self, + vector_store_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreObject: + """ + Retrieves a vector store. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._get( + f"/vector_stores/{vector_store_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreObject, + ) + + async def update( + self, + vector_store_id: str, + *, + expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + name: Optional[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreObject: + """ + Modifies a vector store. + + Args: + expires_after: The expiration policy for a vector store. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + name: The name of the vector store. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._post( + f"/vector_stores/{vector_store_id}", + body=await async_maybe_transform( + { + "expires_after": expires_after, + "metadata": metadata, + "name": name, + }, + vector_store_update_params.VectorStoreUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreObject, + ) + + async def list( + self, + *, + after: str | NotGiven = NOT_GIVEN, + before: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreListResponse: + """Returns a list of vector stores. + + Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + before: A cursor for use in pagination. `before` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/vector_stores", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "after": after, + "before": before, + "limit": limit, + "order": order, + }, + vector_store_list_params.VectorStoreListParams, + ), + ), + cast_to=VectorStoreListResponse, + ) + + async def delete( + self, + vector_store_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreDeleteResponse: + """ + Delete a vector store. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._delete( + f"/vector_stores/{vector_store_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreDeleteResponse, + ) + + async def search( + self, + vector_store_id: str, + *, + query: Union[str, List[str]], + filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, + max_num_results: int | NotGiven = NOT_GIVEN, + ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, + rewrite_query: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VectorStoreSearchResponse: + """ + Search a vector store for relevant chunks based on a query and file attributes + filter. + + Args: + query: A query string for a search + + filters: A filter to apply based on file attributes. + + max_num_results: The maximum number of results to return. This number should be between 1 and 50 + inclusive. + + ranking_options: Ranking options for search. + + rewrite_query: Whether to rewrite the natural language query for vector search. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not vector_store_id: + raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") + return await self._post( + f"/vector_stores/{vector_store_id}/search", + body=await async_maybe_transform( + { + "query": query, + "filters": filters, + "max_num_results": max_num_results, + "ranking_options": ranking_options, + "rewrite_query": rewrite_query, + }, + vector_store_search_params.VectorStoreSearchParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VectorStoreSearchResponse, + ) + + +class VectorStoresResourceWithRawResponse: + def __init__(self, vector_stores: VectorStoresResource) -> None: + self._vector_stores = vector_stores + + self.create = to_raw_response_wrapper( + vector_stores.create, + ) + self.retrieve = to_raw_response_wrapper( + vector_stores.retrieve, + ) + self.update = to_raw_response_wrapper( + vector_stores.update, + ) + self.list = to_raw_response_wrapper( + vector_stores.list, + ) + self.delete = to_raw_response_wrapper( + vector_stores.delete, + ) + self.search = to_raw_response_wrapper( + vector_stores.search, + ) + + @cached_property + def file_batches(self) -> FileBatchesResourceWithRawResponse: + return FileBatchesResourceWithRawResponse(self._vector_stores.file_batches) + + @cached_property + def files(self) -> FilesResourceWithRawResponse: + return FilesResourceWithRawResponse(self._vector_stores.files) + + +class AsyncVectorStoresResourceWithRawResponse: + def __init__(self, vector_stores: AsyncVectorStoresResource) -> None: + self._vector_stores = vector_stores + + self.create = async_to_raw_response_wrapper( + vector_stores.create, + ) + self.retrieve = async_to_raw_response_wrapper( + vector_stores.retrieve, + ) + self.update = async_to_raw_response_wrapper( + vector_stores.update, + ) + self.list = async_to_raw_response_wrapper( + vector_stores.list, + ) + self.delete = async_to_raw_response_wrapper( + vector_stores.delete, + ) + self.search = async_to_raw_response_wrapper( + vector_stores.search, + ) + + @cached_property + def file_batches(self) -> AsyncFileBatchesResourceWithRawResponse: + return AsyncFileBatchesResourceWithRawResponse(self._vector_stores.file_batches) + + @cached_property + def files(self) -> AsyncFilesResourceWithRawResponse: + return AsyncFilesResourceWithRawResponse(self._vector_stores.files) + + +class VectorStoresResourceWithStreamingResponse: + def __init__(self, vector_stores: VectorStoresResource) -> None: + self._vector_stores = vector_stores + + self.create = to_streamed_response_wrapper( + vector_stores.create, + ) + self.retrieve = to_streamed_response_wrapper( + vector_stores.retrieve, + ) + self.update = to_streamed_response_wrapper( + vector_stores.update, + ) + self.list = to_streamed_response_wrapper( + vector_stores.list, + ) + self.delete = to_streamed_response_wrapper( + vector_stores.delete, + ) + self.search = to_streamed_response_wrapper( + vector_stores.search, + ) + + @cached_property + def file_batches(self) -> FileBatchesResourceWithStreamingResponse: + return FileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches) + + @cached_property + def files(self) -> FilesResourceWithStreamingResponse: + return FilesResourceWithStreamingResponse(self._vector_stores.files) + + +class AsyncVectorStoresResourceWithStreamingResponse: + def __init__(self, vector_stores: AsyncVectorStoresResource) -> None: + self._vector_stores = vector_stores + + self.create = async_to_streamed_response_wrapper( + vector_stores.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + vector_stores.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + vector_stores.update, + ) + self.list = async_to_streamed_response_wrapper( + vector_stores.list, + ) + self.delete = async_to_streamed_response_wrapper( + vector_stores.delete, + ) + self.search = async_to_streamed_response_wrapper( + vector_stores.search, + ) + + @cached_property + def file_batches(self) -> AsyncFileBatchesResourceWithStreamingResponse: + return AsyncFileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches) + + @cached_property + def files(self) -> AsyncFilesResourceWithStreamingResponse: + return AsyncFilesResourceWithStreamingResponse(self._vector_stores.files) diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py new file mode 100644 index 00000000..49c8d424 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -0,0 +1,136 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .batch import Batch as Batch +from .model import Model as Model +from .upload import Upload as Upload +from .response import Response as Response +from .includable import Includable as Includable +from .openai_file import OpenAIFile as OpenAIFile +from .input_content import InputContent as InputContent +from .input_message import InputMessage as InputMessage +from .thread_object import ThreadObject as ThreadObject +from .output_message import OutputMessage as OutputMessage +from .reasoning_item import ReasoningItem as ReasoningItem +from .usage_response import UsageResponse as UsageResponse +from .compound_filter import CompoundFilter as CompoundFilter +from .function_object import FunctionObject as FunctionObject +from .images_response import ImagesResponse as ImagesResponse +from .assistant_object import AssistantObject as AssistantObject +from .file_list_params import FileListParams as FileListParams +from .reasoning_effort import ReasoningEffort as ReasoningEffort +from .voice_ids_shared import VoiceIDsShared as VoiceIDsShared +from .batch_list_params import BatchListParams as BatchListParams +from .comparison_filter import ComparisonFilter as ComparisonFilter +from .computer_tool_call import ComputerToolCall as ComputerToolCall +from .file_list_response import FileListResponse as FileListResponse +from .file_search_ranker import FileSearchRanker as FileSearchRanker +from .file_upload_params import FileUploadParams as FileUploadParams +from .function_tool_call import FunctionToolCall as FunctionToolCall +from .batch_create_params import BatchCreateParams as BatchCreateParams +from .batch_list_response import BatchListResponse as BatchListResponse +from .input_content_param import InputContentParam as InputContentParam +from .input_message_param import InputMessageParam as InputMessageParam +from .model_list_response import ModelListResponse as ModelListResponse +from .response_properties import ResponseProperties as ResponseProperties +from .vector_store_object import VectorStoreObject as VectorStoreObject +from .assistant_tools_code import AssistantToolsCode as AssistantToolsCode +from .audit_log_actor_user import AuditLogActorUser as AuditLogActorUser +from .audit_log_event_type import AuditLogEventType as AuditLogEventType +from .file_delete_response import FileDeleteResponse as FileDeleteResponse +from .output_message_param import OutputMessageParam as OutputMessageParam +from .reasoning_item_param import ReasoningItemParam as ReasoningItemParam +from .thread_create_params import ThreadCreateParams as ThreadCreateParams +from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams +from .upload_create_params import UploadCreateParams as UploadCreateParams +from .web_search_tool_call import WebSearchToolCall as WebSearchToolCall +from .assistant_list_params import AssistantListParams as AssistantListParams +from .compound_filter_param import CompoundFilterParam as CompoundFilterParam +from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall +from .function_object_param import FunctionObjectParam as FunctionObjectParam +from .model_delete_response import ModelDeleteResponse as ModelDeleteResponse +from .transcription_segment import TranscriptionSegment as TranscriptionSegment +from .response_create_params import ResponseCreateParams as ResponseCreateParams +from .thread_delete_response import ThreadDeleteResponse as ThreadDeleteResponse +from .upload_add_part_params import UploadAddPartParams as UploadAddPartParams +from .upload_complete_params import UploadCompleteParams as UploadCompleteParams +from .voice_ids_shared_param import VoiceIDsSharedParam as VoiceIDsSharedParam +from .assistant_create_params import AssistantCreateParams as AssistantCreateParams +from .assistant_list_response import AssistantListResponse as AssistantListResponse +from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams +from .comparison_filter_param import ComparisonFilterParam as ComparisonFilterParam +from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .assistant_tools_function import AssistantToolsFunction as AssistantToolsFunction +from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .computer_tool_call_param import ComputerToolCallParam as ComputerToolCallParam +from .function_tool_call_param import FunctionToolCallParam as FunctionToolCallParam +from .image_create_edit_params import ImageCreateEditParams as ImageCreateEditParams +from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams +from .static_chunking_strategy import StaticChunkingStrategy as StaticChunkingStrategy +from .stop_configuration_param import StopConfigurationParam as StopConfigurationParam +from .upload_add_part_response import UploadAddPartResponse as UploadAddPartResponse +from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams +from .assistant_delete_response import AssistantDeleteResponse as AssistantDeleteResponse +from .computer_tool_call_output import ComputerToolCallOutput as ComputerToolCallOutput +from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .function_tool_call_output import FunctionToolCallOutput as FunctionToolCallOutput +from .model_response_properties import ModelResponseProperties as ModelResponseProperties +from .assistant_supported_models import AssistantSupportedModels as AssistantSupportedModels +from .assistant_tools_code_param import AssistantToolsCodeParam as AssistantToolsCodeParam +from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse +from .moderation_classify_params import ModerationClassifyParams as ModerationClassifyParams +from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams +from .vector_store_list_response import VectorStoreListResponse as VectorStoreListResponse +from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams +from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams +from .web_search_tool_call_param import WebSearchToolCallParam as WebSearchToolCallParam +from .assistant_tools_file_search import AssistantToolsFileSearch as AssistantToolsFileSearch +from .create_thread_request_param import CreateThreadRequestParam as CreateThreadRequestParam +from .file_search_tool_call_param import FileSearchToolCallParam as FileSearchToolCallParam +from .audio_generate_speech_params import AudioGenerateSpeechParams as AudioGenerateSpeechParams +from .audio_translate_audio_params import AudioTranslateAudioParams as AudioTranslateAudioParams +from .moderation_classify_response import ModerationClassifyResponse as ModerationClassifyResponse +from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse +from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse +from .audio_transcribe_audio_params import AudioTranscribeAudioParams as AudioTranscribeAudioParams +from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams +from .organization_get_costs_params import OrganizationGetCostsParams as OrganizationGetCostsParams +from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter +from .assistant_tools_function_param import AssistantToolsFunctionParam as AssistantToolsFunctionParam +from .audio_translate_audio_response import AudioTranslateAudioResponse as AudioTranslateAudioResponse +from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse +from .image_create_generation_params import ImageCreateGenerationParams as ImageCreateGenerationParams +from .realtime_create_session_params import RealtimeCreateSessionParams as RealtimeCreateSessionParams +from .static_chunking_strategy_param import StaticChunkingStrategyParam as StaticChunkingStrategyParam +from .audio_transcribe_audio_response import AudioTranscribeAudioResponse as AudioTranscribeAudioResponse +from .computer_tool_call_output_param import ComputerToolCallOutputParam as ComputerToolCallOutputParam +from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck as ComputerToolCallSafetyCheck +from .function_tool_call_output_param import FunctionToolCallOutputParam as FunctionToolCallOutputParam +from .realtime_create_session_response import RealtimeCreateSessionResponse as RealtimeCreateSessionResponse +from .response_list_input_items_params import ResponseListInputItemsParams as ResponseListInputItemsParams +from .assistant_tools_file_search_param import AssistantToolsFileSearchParam as AssistantToolsFileSearchParam +from .response_list_input_items_response import ResponseListInputItemsResponse as ResponseListInputItemsResponse +from .organization_list_audit_logs_params import OrganizationListAuditLogsParams as OrganizationListAuditLogsParams +from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam +from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam as AutoChunkingStrategyRequestParam +from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam +from .assistants_api_response_format_option import ( + AssistantsAPIResponseFormatOption as AssistantsAPIResponseFormatOption, +) +from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam as ComputerToolCallSafetyCheckParam +from .organization_list_audit_logs_response import ( + OrganizationListAuditLogsResponse as OrganizationListAuditLogsResponse, +) +from .static_chunking_strategy_request_param import ( + StaticChunkingStrategyRequestParam as StaticChunkingStrategyRequestParam, +) +from .assistants_api_response_format_option_param import ( + AssistantsAPIResponseFormatOptionParam as AssistantsAPIResponseFormatOptionParam, +) +from .realtime_create_transcription_session_params import ( + RealtimeCreateTranscriptionSessionParams as RealtimeCreateTranscriptionSessionParams, +) +from .realtime_create_transcription_session_response import ( + RealtimeCreateTranscriptionSessionResponse as RealtimeCreateTranscriptionSessionResponse, +) diff --git a/src/digitalocean_genai_sdk/types/assistant_create_params.py b/src/digitalocean_genai_sdk/types/assistant_create_params.py new file mode 100644 index 00000000..b89e4742 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_create_params.py @@ -0,0 +1,211 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .reasoning_effort import ReasoningEffort +from .assistant_supported_models import AssistantSupportedModels +from .assistant_tools_code_param import AssistantToolsCodeParam +from .assistant_tools_function_param import AssistantToolsFunctionParam +from .assistant_tools_file_search_param import AssistantToolsFileSearchParam +from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam + +__all__ = [ + "AssistantCreateParams", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", + "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic", + "Tool", +] + + +class AssistantCreateParams(TypedDict, total=False): + model: Required[Union[str, AssistantSupportedModels]] + """ID of the model to use. + + You can use the [List models](/docs/api-reference/models/list) API to see all of + your available models, or see our [Model overview](/docs/models) for + descriptions of them. + """ + + description: Optional[str] + """The description of the assistant. The maximum length is 512 characters.""" + + instructions: Optional[str] + """The system instructions that the assistant uses. + + The maximum length is 256,000 characters. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: Optional[str] + """The name of the assistant. The maximum length is 256 characters.""" + + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + response_format: Optional[AssistantsAPIResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_resources: Optional[ToolResources] + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + tools: Iterable[Tool] + """A list of tool enabled on the assistant. + + There can be a maximum of 128 tools per assistant. Tools can be of types + `code_interpreter`, `file_search`, or `function`. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy, + ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy, +] + + +class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + + file_ids: List[str] + """A list of [file](/docs/api-reference/files) IDs to add to the vector store. + + There can be a maximum of 10000 files in a vector store. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The [vector store](/docs/api-reference/vector-stores/object) attached to this + assistant. There can be a maximum of 1 vector store attached to the assistant. + """ + + vector_stores: Iterable[ToolResourcesFileSearchVectorStore] + """ + A helper to create a [vector store](/docs/api-reference/vector-stores/object) + with file_ids and attach it to this assistant. There can be a maximum of 1 + vector store attached to the assistant. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch + + +Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/assistant_delete_response.py b/src/digitalocean_genai_sdk/types/assistant_delete_response.py new file mode 100644 index 00000000..04207049 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["AssistantDeleteResponse"] + + +class AssistantDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["assistant.deleted"] diff --git a/src/digitalocean_genai_sdk/types/assistant_list_params.py b/src/digitalocean_genai_sdk/types/assistant_list_params.py new file mode 100644 index 00000000..834ffbca --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_list_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["AssistantListParams"] + + +class AssistantListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/assistant_list_response.py b/src/digitalocean_genai_sdk/types/assistant_list_response.py new file mode 100644 index 00000000..dfc90bfa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .._models import BaseModel +from .assistant_object import AssistantObject + +__all__ = ["AssistantListResponse"] + + +class AssistantListResponse(BaseModel): + data: List[AssistantObject] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/assistant_object.py b/src/digitalocean_genai_sdk/types/assistant_object.py new file mode 100644 index 00000000..4aa71ab9 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_object.py @@ -0,0 +1,133 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from .._models import BaseModel +from .assistant_tools_code import AssistantToolsCode +from .assistant_tools_function import AssistantToolsFunction +from .assistant_tools_file_search import AssistantToolsFileSearch +from .assistants_api_response_format_option import AssistantsAPIResponseFormatOption + +__all__ = ["AssistantObject", "Tool", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + +Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction] + + +class ToolResourcesCodeInterpreter(BaseModel): + file_ids: Optional[List[str]] = None + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter`` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearch(BaseModel): + vector_store_ids: Optional[List[str]] = None + """ + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached + to this assistant. There can be a maximum of 1 vector store attached to the + assistant. + """ + + +class ToolResources(BaseModel): + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None + + file_search: Optional[ToolResourcesFileSearch] = None + + +class AssistantObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the assistant was created.""" + + description: Optional[str] = None + """The description of the assistant. The maximum length is 512 characters.""" + + instructions: Optional[str] = None + """The system instructions that the assistant uses. + + The maximum length is 256,000 characters. + """ + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: str + """ID of the model to use. + + You can use the [List models](/docs/api-reference/models/list) API to see all of + your available models, or see our [Model overview](/docs/models) for + descriptions of them. + """ + + name: Optional[str] = None + """The name of the assistant. The maximum length is 256 characters.""" + + object: Literal["assistant"] + """The object type, which is always `assistant`.""" + + tools: List[Tool] + """A list of tool enabled on the assistant. + + There can be a maximum of 128 tools per assistant. Tools can be of types + `code_interpreter`, `file_search`, or `function`. + """ + + response_format: Optional[AssistantsAPIResponseFormatOption] = None + """Specifies the format that the model must output. + + Compatible with [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] = None + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_resources: Optional[ToolResources] = None + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + top_p: Optional[float] = None + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ diff --git a/src/digitalocean_genai_sdk/types/assistant_supported_models.py b/src/digitalocean_genai_sdk/types/assistant_supported_models.py new file mode 100644 index 00000000..999b7f23 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_supported_models.py @@ -0,0 +1,38 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["AssistantSupportedModels"] + +AssistantSupportedModels: TypeAlias = Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", +] diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code.py b/src/digitalocean_genai_sdk/types/assistant_tools_code.py new file mode 100644 index 00000000..73a40a71 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_tools_code.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["AssistantToolsCode"] + + +class AssistantToolsCode(BaseModel): + type: Literal["code_interpreter"] + """The type of tool being defined: `code_interpreter`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py new file mode 100644 index 00000000..01420dda --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AssistantToolsCodeParam"] + + +class AssistantToolsCodeParam(TypedDict, total=False): + type: Required[Literal["code_interpreter"]] + """The type of tool being defined: `code_interpreter`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py new file mode 100644 index 00000000..3c834718 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .file_search_ranker import FileSearchRanker + +__all__ = ["AssistantToolsFileSearch", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(BaseModel): + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + ranker: Optional[FileSearchRanker] = None + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + +class FileSearch(BaseModel): + max_num_results: Optional[int] = None + """The maximum number of results the file search tool should output. + + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + ranking_options: Optional[FileSearchRankingOptions] = None + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + +class AssistantToolsFileSearch(BaseModel): + type: Literal["file_search"] + """The type of tool being defined: `file_search`""" + + file_search: Optional[FileSearch] = None + """Overrides for the file search tool.""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py new file mode 100644 index 00000000..3f0e5af4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .file_search_ranker import FileSearchRanker + +__all__ = ["AssistantToolsFileSearchParam", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(TypedDict, total=False): + score_threshold: Required[float] + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + ranker: FileSearchRanker + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + +class FileSearch(TypedDict, total=False): + max_num_results: int + """The maximum number of results the file search tool should output. + + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + ranking_options: FileSearchRankingOptions + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + +class AssistantToolsFileSearchParam(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + file_search: FileSearch + """Overrides for the file search tool.""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function.py b/src/digitalocean_genai_sdk/types/assistant_tools_function.py new file mode 100644 index 00000000..89326d54 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_tools_function.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel +from .function_object import FunctionObject + +__all__ = ["AssistantToolsFunction"] + + +class AssistantToolsFunction(BaseModel): + function: FunctionObject + + type: Literal["function"] + """The type of tool being defined: `function`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py new file mode 100644 index 00000000..4e9ecf3d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .function_object_param import FunctionObjectParam + +__all__ = ["AssistantToolsFunctionParam"] + + +class AssistantToolsFunctionParam(TypedDict, total=False): + function: Required[FunctionObjectParam] + + type: Required[Literal["function"]] + """The type of tool being defined: `function`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_update_params.py b/src/digitalocean_genai_sdk/types/assistant_update_params.py new file mode 100644 index 00000000..cf301dd4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistant_update_params.py @@ -0,0 +1,137 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import TypeAlias, TypedDict + +from .reasoning_effort import ReasoningEffort +from .assistant_supported_models import AssistantSupportedModels +from .assistant_tools_code_param import AssistantToolsCodeParam +from .assistant_tools_function_param import AssistantToolsFunctionParam +from .assistant_tools_file_search_param import AssistantToolsFileSearchParam +from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam + +__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"] + + +class AssistantUpdateParams(TypedDict, total=False): + description: Optional[str] + """The description of the assistant. The maximum length is 512 characters.""" + + instructions: Optional[str] + """The system instructions that the assistant uses. + + The maximum length is 256,000 characters. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: Union[str, AssistantSupportedModels] + """ID of the model to use. + + You can use the [List models](/docs/api-reference/models/list) API to see all of + your available models, or see our [Model overview](/docs/models) for + descriptions of them. + """ + + name: Optional[str] + """The name of the assistant. The maximum length is 256 characters.""" + + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + response_format: Optional[AssistantsAPIResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_resources: Optional[ToolResources] + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + tools: Iterable[Tool] + """A list of tool enabled on the assistant. + + There can be a maximum of 128 tools per assistant. Tools can be of types + `code_interpreter`, `file_search`, or `function`. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + Overrides the list of [file](/docs/api-reference/files) IDs made available to + the `code_interpreter` tool. There can be a maximum of 20 files associated with + the tool. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached + to this assistant. There can be a maximum of 1 vector store attached to the + assistant. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch + + +Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py new file mode 100644 index 00000000..07c4f71e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .chat.response_format_text import ResponseFormatText +from .chat.response_format_json_object import ResponseFormatJsonObject +from .chat.response_format_json_schema import ResponseFormatJsonSchema + +__all__ = ["AssistantsAPIResponseFormatOption"] + +AssistantsAPIResponseFormatOption: TypeAlias = Union[ + Literal["auto"], ResponseFormatText, ResponseFormatJsonObject, ResponseFormatJsonSchema +] diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py new file mode 100644 index 00000000..7dbf967f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .chat.response_format_text_param import ResponseFormatTextParam +from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam +from .chat.response_format_json_schema_param import ResponseFormatJsonSchemaParam + +__all__ = ["AssistantsAPIResponseFormatOptionParam"] + +AssistantsAPIResponseFormatOptionParam: TypeAlias = Union[ + Literal["auto"], ResponseFormatTextParam, ResponseFormatJsonObjectParam, ResponseFormatJsonSchemaParam +] diff --git a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py b/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py new file mode 100644 index 00000000..8857594a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypedDict + +from .voice_ids_shared_param import VoiceIDsSharedParam + +__all__ = ["AudioGenerateSpeechParams"] + + +class AudioGenerateSpeechParams(TypedDict, total=False): + input: Required[str] + """The text to generate audio for. The maximum length is 4096 characters.""" + + model: Required[Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]]] + """ + One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or + `gpt-4o-mini-tts`. + """ + + voice: Required[VoiceIDsSharedParam] + """The voice to use when generating the audio. + + Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, + `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in + the [Text to speech guide](/docs/guides/text-to-speech#voice-options). + """ + + instructions: str + """Control the voice of your generated audio with additional instructions. + + Does not work with `tts-1` or `tts-1-hd`. + """ + + response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] + """The format to audio in. + + Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. + """ + + speed: float + """The speed of the generated audio. + + Select a value from `0.25` to `4.0`. `1.0` is the default. + """ diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py new file mode 100644 index 00000000..cbc15157 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py @@ -0,0 +1,87 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypedDict + +from .._types import FileTypes + +__all__ = ["AudioTranscribeAudioParams"] + + +class AudioTranscribeAudioParams(TypedDict, total=False): + file: Required[FileTypes] + """ + The audio file object (not file name) to transcribe, in one of these formats: + flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + """ + + model: Required[Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]]] + """ID of the model to use. + + The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1` + (which is powered by our open source Whisper V2 model). + """ + + include: List[Literal["logprobs"]] + """Additional information to include in the transcription response. + + `logprobs` will return the log probabilities of the tokens in the response to + understand the model's confidence in the transcription. `logprobs` only works + with response_format set to `json` and only with the models `gpt-4o-transcribe` + and `gpt-4o-mini-transcribe`. + """ + + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + + prompt: str + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](/docs/guides/speech-to-text#prompting) should match the audio + language. + """ + + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] + """ + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + the only supported format is `json`. + """ + + stream: Optional[bool] + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + for more information. + + Note: Streaming is not supported for the `whisper-1` model and will be ignored. + """ + + temperature: float + """The sampling temperature, between 0 and 1. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + """ + + timestamp_granularities: List[Literal["word", "segment"]] + """The timestamp granularities to populate for this transcription. + + `response_format` must be set `verbose_json` to use timestamp granularities. + Either or both of these options are supported: `word`, or `segment`. Note: There + is no additional latency for segment timestamps, but generating word timestamps + incurs additional latency. + """ diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py new file mode 100644 index 00000000..54b999ed --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import TypeAlias + +from .._models import BaseModel +from .transcription_segment import TranscriptionSegment + +__all__ = [ + "AudioTranscribeAudioResponse", + "CreateTranscriptionResponseJson", + "CreateTranscriptionResponseJsonLogprob", + "CreateTranscriptionResponseVerboseJson", + "CreateTranscriptionResponseVerboseJsonWord", +] + + +class CreateTranscriptionResponseJsonLogprob(BaseModel): + token: str + """The token that was used to generate the log probability.""" + + bytes: List[int] + """The bytes that were used to generate the log probability.""" + + logprob: float + """The log probability of the token.""" + + +class CreateTranscriptionResponseJson(BaseModel): + text: str + """The transcribed text.""" + + logprobs: Optional[List[CreateTranscriptionResponseJsonLogprob]] = None + """The log probabilities of the tokens in the transcription. + + Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` + if `logprobs` is added to the `include` array. + """ + + +class CreateTranscriptionResponseVerboseJsonWord(BaseModel): + end: float + """End time of the word in seconds.""" + + start: float + """Start time of the word in seconds.""" + + word: str + """The text content of the word.""" + + +class CreateTranscriptionResponseVerboseJson(BaseModel): + duration: float + """The duration of the input audio.""" + + language: str + """The language of the input audio.""" + + text: str + """The transcribed text.""" + + segments: Optional[List[TranscriptionSegment]] = None + """Segments of the transcribed text and their corresponding details.""" + + words: Optional[List[CreateTranscriptionResponseVerboseJsonWord]] = None + """Extracted words and their corresponding timestamps.""" + + +AudioTranscribeAudioResponse: TypeAlias = Union[CreateTranscriptionResponseJson, CreateTranscriptionResponseVerboseJson] diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py new file mode 100644 index 00000000..cc222f14 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypedDict + +from .._types import FileTypes + +__all__ = ["AudioTranslateAudioParams"] + + +class AudioTranslateAudioParams(TypedDict, total=False): + file: Required[FileTypes] + """ + The audio file object (not file name) translate, in one of these formats: flac, + mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + """ + + model: Required[Union[str, Literal["whisper-1"]]] + """ID of the model to use. + + Only `whisper-1` (which is powered by our open source Whisper V2 model) is + currently available. + """ + + prompt: str + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](/docs/guides/speech-to-text#prompting) should be in English. + """ + + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] + """ + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. + """ + + temperature: float + """The sampling temperature, between 0 and 1. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + """ diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py new file mode 100644 index 00000000..74d08a73 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import TypeAlias + +from .._models import BaseModel +from .transcription_segment import TranscriptionSegment + +__all__ = ["AudioTranslateAudioResponse", "CreateTranslationResponseJson", "CreateTranslationResponseVerboseJson"] + + +class CreateTranslationResponseJson(BaseModel): + text: str + + +class CreateTranslationResponseVerboseJson(BaseModel): + duration: float + """The duration of the input audio.""" + + language: str + """The language of the output translation (always `english`).""" + + text: str + """The translated text.""" + + segments: Optional[List[TranscriptionSegment]] = None + """Segments of the translated text and their corresponding details.""" + + +AudioTranslateAudioResponse: TypeAlias = Union[CreateTranslationResponseJson, CreateTranslationResponseVerboseJson] diff --git a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py b/src/digitalocean_genai_sdk/types/audit_log_actor_user.py new file mode 100644 index 00000000..f3da325d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audit_log_actor_user.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AuditLogActorUser"] + + +class AuditLogActorUser(BaseModel): + id: Optional[str] = None + """The user id.""" + + email: Optional[str] = None + """The user email.""" diff --git a/src/digitalocean_genai_sdk/types/audit_log_event_type.py b/src/digitalocean_genai_sdk/types/audit_log_event_type.py new file mode 100644 index 00000000..2031cbb8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/audit_log_event_type.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["AuditLogEventType"] + +AuditLogEventType: TypeAlias = Literal[ + "api_key.created", + "api_key.updated", + "api_key.deleted", + "invite.sent", + "invite.accepted", + "invite.deleted", + "login.succeeded", + "login.failed", + "logout.succeeded", + "logout.failed", + "organization.updated", + "project.created", + "project.updated", + "project.archived", + "service_account.created", + "service_account.updated", + "service_account.deleted", + "rate_limit.updated", + "rate_limit.deleted", + "user.added", + "user.updated", + "user.deleted", +] diff --git a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py new file mode 100644 index 00000000..5c0c131e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AutoChunkingStrategyRequestParam"] + + +class AutoChunkingStrategyRequestParam(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" diff --git a/src/digitalocean_genai_sdk/types/batch.py b/src/digitalocean_genai_sdk/types/batch.py new file mode 100644 index 00000000..1fdd6928 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/batch.py @@ -0,0 +1,109 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["Batch", "Errors", "ErrorsData", "RequestCounts"] + + +class ErrorsData(BaseModel): + code: Optional[str] = None + """An error code identifying the error type.""" + + line: Optional[int] = None + """The line number of the input file where the error occurred, if applicable.""" + + message: Optional[str] = None + """A human-readable message providing more details about the error.""" + + param: Optional[str] = None + """The name of the parameter that caused the error, if applicable.""" + + +class Errors(BaseModel): + data: Optional[List[ErrorsData]] = None + + object: Optional[str] = None + """The object type, which is always `list`.""" + + +class RequestCounts(BaseModel): + completed: int + """Number of requests that have been completed successfully.""" + + failed: int + """Number of requests that have failed.""" + + total: int + """Total number of requests in the batch.""" + + +class Batch(BaseModel): + id: str + + completion_window: str + """The time frame within which the batch should be processed.""" + + created_at: int + """The Unix timestamp (in seconds) for when the batch was created.""" + + endpoint: str + """The OpenAI API endpoint used by the batch.""" + + input_file_id: str + """The ID of the input file for the batch.""" + + object: Literal["batch"] + """The object type, which is always `batch`.""" + + status: Literal[ + "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" + ] + """The current status of the batch.""" + + cancelled_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch was cancelled.""" + + cancelling_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch started cancelling.""" + + completed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch was completed.""" + + error_file_id: Optional[str] = None + """The ID of the file containing the outputs of requests with errors.""" + + errors: Optional[Errors] = None + + expired_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch expired.""" + + expires_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch will expire.""" + + failed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch failed.""" + + finalizing_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch started finalizing.""" + + in_progress_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the batch started processing.""" + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + output_file_id: Optional[str] = None + """The ID of the file containing the outputs of successfully executed requests.""" + + request_counts: Optional[RequestCounts] = None + """The request counts for different statuses within the batch.""" diff --git a/src/digitalocean_genai_sdk/types/batch_create_params.py b/src/digitalocean_genai_sdk/types/batch_create_params.py new file mode 100644 index 00000000..08243244 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/batch_create_params.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["BatchCreateParams"] + + +class BatchCreateParams(TypedDict, total=False): + completion_window: Required[Literal["24h"]] + """The time frame within which the batch should be processed. + + Currently only `24h` is supported. + """ + + endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] + """The endpoint to be used for all requests in the batch. + + Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and + `/v1/completions` are supported. Note that `/v1/embeddings` batches are also + restricted to a maximum of 50,000 embedding inputs across all requests in the + batch. + """ + + input_file_id: Required[str] + """The ID of an uploaded file that contains requests for the new batch. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your input file must be formatted as a + [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with + the purpose `batch`. The file can contain up to 50,000 requests, and can be up + to 200 MB in size. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/digitalocean_genai_sdk/types/batch_list_params.py b/src/digitalocean_genai_sdk/types/batch_list_params.py new file mode 100644 index 00000000..ef5e966b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/batch_list_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["BatchListParams"] + + +class BatchListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/batch_list_response.py b/src/digitalocean_genai_sdk/types/batch_list_response.py new file mode 100644 index 00000000..87c4f9b8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/batch_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .batch import Batch +from .._models import BaseModel + +__all__ = ["BatchListResponse"] + + +class BatchListResponse(BaseModel): + data: List[Batch] + + has_more: bool + + object: Literal["list"] + + first_id: Optional[str] = None + + last_id: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/chat/__init__.py b/src/digitalocean_genai_sdk/types/chat/__init__.py new file mode 100644 index 00000000..cfa8c56a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/__init__.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .usage import Usage as Usage +from .token_logprob import TokenLogprob as TokenLogprob +from .create_response import CreateResponse as CreateResponse +from .response_message import ResponseMessage as ResponseMessage +from .message_tool_call import MessageToolCall as MessageToolCall +from .web_search_location import WebSearchLocation as WebSearchLocation +from .response_format_text import ResponseFormatText as ResponseFormatText +from .completion_list_params import CompletionListParams as CompletionListParams +from .model_ids_shared_param import ModelIDsSharedParam as ModelIDsSharedParam +from .message_tool_call_param import MessageToolCallParam as MessageToolCallParam +from .web_search_context_size import WebSearchContextSize as WebSearchContextSize +from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .completion_list_response import CompletionListResponse as CompletionListResponse +from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams +from .web_search_location_param import WebSearchLocationParam as WebSearchLocationParam +from .completion_delete_response import CompletionDeleteResponse as CompletionDeleteResponse +from .response_format_text_param import ResponseFormatTextParam as ResponseFormatTextParam +from .response_format_json_object import ResponseFormatJsonObject as ResponseFormatJsonObject +from .response_format_json_schema import ResponseFormatJsonSchema as ResponseFormatJsonSchema +from .completion_list_messages_params import CompletionListMessagesParams as CompletionListMessagesParams +from .completion_list_messages_response import CompletionListMessagesResponse as CompletionListMessagesResponse +from .response_format_json_object_param import ResponseFormatJsonObjectParam as ResponseFormatJsonObjectParam +from .response_format_json_schema_param import ResponseFormatJsonSchemaParam as ResponseFormatJsonSchemaParam +from .request_message_content_part_text_param import ( + RequestMessageContentPartTextParam as RequestMessageContentPartTextParam, +) diff --git a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py new file mode 100644 index 00000000..d11f9322 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py @@ -0,0 +1,662 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..reasoning_effort import ReasoningEffort +from ..function_object_param import FunctionObjectParam +from .model_ids_shared_param import ModelIDsSharedParam +from ..voice_ids_shared_param import VoiceIDsSharedParam +from .message_tool_call_param import MessageToolCallParam +from .web_search_context_size import WebSearchContextSize +from ..stop_configuration_param import StopConfigurationParam +from .web_search_location_param import WebSearchLocationParam +from .response_format_text_param import ResponseFormatTextParam +from .response_format_json_object_param import ResponseFormatJsonObjectParam +from .response_format_json_schema_param import ResponseFormatJsonSchemaParam +from ..chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from .request_message_content_part_text_param import RequestMessageContentPartTextParam + +__all__ = [ + "CompletionCreateParams", + "Message", + "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPart", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile", + "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageAudio", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal", + "MessageChatCompletionRequestAssistantMessageFunctionCall", + "MessageChatCompletionRequestToolMessage", + "MessageChatCompletionRequestFunctionMessage", + "Audio", + "FunctionCall", + "FunctionCallChatCompletionFunctionCallOption", + "Function", + "Prediction", + "ResponseFormat", + "ToolChoice", + "ToolChoiceChatCompletionNamedToolChoice", + "ToolChoiceChatCompletionNamedToolChoiceFunction", + "Tool", + "WebSearchOptions", + "WebSearchOptionsUserLocation", +] + + +class CompletionCreateParams(TypedDict, total=False): + messages: Required[Iterable[Message]] + """A list of messages comprising the conversation so far. + + Depending on the [model](/docs/models) you use, different message types + (modalities) are supported, like [text](/docs/guides/text-generation), + [images](/docs/guides/vision), and [audio](/docs/guides/audio). + """ + + model: Required[ModelIDsSharedParam] + """Model ID used to generate the response, like `gpt-4o` or `o1`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the [model guide](/docs/models) to + browse and compare available models. + """ + + audio: Optional[Audio] + """Parameters for audio output. + + Required when audio output is requested with `modalities: ["audio"]`. + [Learn more](/docs/guides/audio). + """ + + frequency_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on their existing frequency in the + text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + + function_call: FunctionCall + """Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a + function. + + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + """ + + functions: Iterable[Function] + """Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + """ + + logit_bias: Optional[Dict[str, int]] + """Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + """ + + logprobs: Optional[bool] + """Whether to return log probabilities of the output tokens or not. + + If true, returns the log probabilities of each output token returned in the + `content` of `message`. + """ + + max_completion_tokens: Optional[int] + """ + An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + """ + + max_tokens: Optional[int] + """ + The maximum number of [tokens](/tokenizer) that can be generated in the chat + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with [o1 series models](/docs/guides/reasoning). + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + modalities: Optional[List[Literal["text", "audio"]]] + """ + Output types that you would like the model to generate. Most models are capable + of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](/docs/guides/audio). To request that this model generate both + text and audio responses, you can use: + + `["text", "audio"]` + """ + + n: Optional[int] + """How many chat completion choices to generate for each input message. + + Note that you will be charged based on the number of generated tokens across all + of the choices. Keep `n` as `1` to minimize costs. + """ + + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + """ + + prediction: Optional[Prediction] + """ + Static predicted output content, such as the content of a text file that is + being regenerated. + """ + + presence_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on whether they appear in the text so + far, increasing the model's likelihood to talk about new topics. + """ + + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + response_format: ResponseFormat + """An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + seed: Optional[int] + """ + This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + """ + + service_tier: Optional[Literal["auto", "default"]] + """Specifies the latency tier to use for processing the request. + + This parameter is relevant for customers subscribed to the scale tier service: + + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` + utilized. + """ + + stop: Optional[StopConfigurationParam] + """Up to 4 sequences where the API will stop generating further tokens. + + The returned text will not contain the stop sequence. + """ + + store: Optional[bool] + """ + Whether or not to store the output of this chat completion request for use in + our [model distillation](/docs/guides/distillation) or + [evals](/docs/guides/evals) products. + """ + + stream: Optional[bool] + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/chat/streaming) for more + information, along with the + [streaming responses](/docs/guides/streaming-responses) guide for more + information on how to handle the streaming events. + """ + + stream_options: Optional[ChatCompletionStreamOptionsParam] + """Options for streaming response. Only set this when you set `stream: true`.""" + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + tool_choice: ToolChoice + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + """ + + tools: Iterable[Tool] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. Use this to provide a list of + functions the model may generate JSON inputs for. A max of 128 functions are + supported. + """ + + top_logprobs: Optional[int] + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ + + web_search_options: WebSearchOptions + """ + This tool searches the web for relevant results to use in a response. Learn more + about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). + """ + + +class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ + + +class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + """The contents of the system message.""" + + role: Required[Literal["system"]] + """The role of the messages author, in this case `system`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ + + +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL( + TypedDict, total=False +): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image. + + Learn more in the + [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). + """ + + +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage( + TypedDict, total=False +): + image_url: Required[ + MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL + ] + + type: Required[Literal["image_url"]] + """The type of the content part.""" + + +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio( + TypedDict, total=False +): + data: Required[str] + """Base64 encoded audio data.""" + + format: Required[Literal["wav", "mp3"]] + """The format of the encoded audio data. Currently supports "wav" and "mp3".""" + + +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio( + TypedDict, total=False +): + input_audio: Required[ + MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio + ] + + type: Required[Literal["input_audio"]] + """The type of the content part. Always `input_audio`.""" + + +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile( + TypedDict, total=False +): + file_data: str + """ + The base64 encoded file data, used when passing the file to the model as a + string. + """ + + file_id: str + """The ID of an uploaded file to use as input.""" + + filename: str + """The name of the file, used when passing the file to the model as a string.""" + + +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile( + TypedDict, total=False +): + file: Required[ + MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile + ] + + type: Required[Literal["file"]] + """The type of the content part. Always `file`.""" + + +MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[ + RequestMessageContentPartTextParam, + MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage, + MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio, + MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile, +] + + +class MessageChatCompletionRequestUserMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]] + """The contents of the user message.""" + + role: Required[Literal["user"]] + """The role of the messages author, in this case `user`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ + + +class MessageChatCompletionRequestAssistantMessageAudio(TypedDict, total=False): + id: Required[str] + """Unique identifier for a previous audio response from the model.""" + + +class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal( + TypedDict, total=False +): + refusal: Required[str] + """The refusal message generated by the model.""" + + type: Required[Literal["refusal"]] + """The type of the content part.""" + + +MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ + RequestMessageContentPartTextParam, + MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal, +] + + +class MessageChatCompletionRequestAssistantMessageFunctionCall(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): + role: Required[Literal["assistant"]] + """The role of the messages author, in this case `assistant`.""" + + audio: Optional[MessageChatCompletionRequestAssistantMessageAudio] + """Data about a previous audio response from the model. + + [Learn more](/docs/guides/audio). + """ + + content: Union[str, Iterable[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] + """The contents of the assistant message. + + Required unless `tool_calls` or `function_call` is specified. + """ + + function_call: Optional[MessageChatCompletionRequestAssistantMessageFunctionCall] + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the + model. + """ + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ + + refusal: Optional[str] + """The refusal message by the assistant.""" + + tool_calls: Iterable[MessageToolCallParam] + """The tool calls generated by the model, such as function calls.""" + + +class MessageChatCompletionRequestToolMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + """The contents of the tool message.""" + + role: Required[Literal["tool"]] + """The role of the messages author, in this case `tool`.""" + + tool_call_id: Required[str] + """Tool call that this message is responding to.""" + + +class MessageChatCompletionRequestFunctionMessage(TypedDict, total=False): + content: Required[Optional[str]] + """The contents of the function message.""" + + name: Required[str] + """The name of the function to call.""" + + role: Required[Literal["function"]] + """The role of the messages author, in this case `function`.""" + + +Message: TypeAlias = Union[ + MessageChatCompletionRequestDeveloperMessage, + MessageChatCompletionRequestSystemMessage, + MessageChatCompletionRequestUserMessage, + MessageChatCompletionRequestAssistantMessage, + MessageChatCompletionRequestToolMessage, + MessageChatCompletionRequestFunctionMessage, +] + + +class Audio(TypedDict, total=False): + format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] + """Specifies the output audio format. + + Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. + """ + + voice: Required[VoiceIDsSharedParam] + """The voice the model uses to respond. + + Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and + `shimmer`. + """ + + +class FunctionCallChatCompletionFunctionCallOption(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +FunctionCall: TypeAlias = Union[Literal["none", "auto"], FunctionCallChatCompletionFunctionCallOption] + + +class Function(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Dict[str, object] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + +class Prediction(TypedDict, total=False): + content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + """ + The content that should be matched when generating a model response. If + generated tokens would match this content, the entire model response can be + returned much more quickly. + """ + + type: Required[Literal["content"]] + """The type of the predicted content you want to provide. + + This type is currently always `content`. + """ + + +ResponseFormat: TypeAlias = Union[ResponseFormatTextParam, ResponseFormatJsonSchemaParam, ResponseFormatJsonObjectParam] + + +class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): + function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + +ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] + + +class Tool(TypedDict, total=False): + function: Required[FunctionObjectParam] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + +class WebSearchOptionsUserLocation(TypedDict, total=False): + approximate: Required[WebSearchLocationParam] + """Approximate location parameters for the search.""" + + type: Required[Literal["approximate"]] + """The type of location approximation. Always `approximate`.""" + + +class WebSearchOptions(TypedDict, total=False): + search_context_size: WebSearchContextSize + """ + High level guidance for the amount of context window space to use for the + search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[WebSearchOptionsUserLocation] + """Approximate location parameters for the search.""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py b/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py new file mode 100644 index 00000000..9e456e16 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["CompletionDeleteResponse"] + + +class CompletionDeleteResponse(BaseModel): + id: str + """The ID of the chat completion that was deleted.""" + + deleted: bool + """Whether the chat completion was deleted.""" + + object: Literal["chat.completion.deleted"] + """The type of object being deleted.""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py new file mode 100644 index 00000000..43f4a7cc --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["CompletionListMessagesParams"] + + +class CompletionListMessagesParams(TypedDict, total=False): + after: str + """Identifier for the last message from the previous pagination request.""" + + limit: int + """Number of messages to retrieve.""" + + order: Literal["asc", "desc"] + """Sort order for messages by timestamp. + + Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. + """ diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py new file mode 100644 index 00000000..57087a63 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_message import ResponseMessage + +__all__ = ["CompletionListMessagesResponse", "Data"] + + +class Data(ResponseMessage): + id: str + """The identifier of the chat message.""" + + +class CompletionListMessagesResponse(BaseModel): + data: List[Data] + """An array of chat completion message objects.""" + + first_id: str + """The identifier of the first chat message in the data array.""" + + has_more: bool + """Indicates whether there are more chat messages available.""" + + last_id: str + """The identifier of the last chat message in the data array.""" + + object: Literal["list"] + """The type of this object. It is always set to "list".""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_params.py new file mode 100644 index 00000000..8f149e35 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_list_params.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, TypedDict + +__all__ = ["CompletionListParams"] + + +class CompletionListParams(TypedDict, total=False): + after: str + """Identifier for the last chat completion from the previous pagination request.""" + + limit: int + """Number of Chat Completions to retrieve.""" + + metadata: Optional[Dict[str, str]] + """A list of metadata keys to filter the Chat Completions by. Example: + + `metadata[key1]=value1&metadata[key2]=value2` + """ + + model: str + """The model used to generate the Chat Completions.""" + + order: Literal["asc", "desc"] + """Sort order for Chat Completions by timestamp. + + Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. + """ diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_response.py new file mode 100644 index 00000000..2899f598 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_list_response.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .create_response import CreateResponse + +__all__ = ["CompletionListResponse"] + + +class CompletionListResponse(BaseModel): + data: List[CreateResponse] + """An array of chat completion objects.""" + + first_id: str + """The identifier of the first chat completion in the data array.""" + + has_more: bool + """Indicates whether there are more Chat Completions available.""" + + last_id: str + """The identifier of the last chat completion in the data array.""" + + object: Literal["list"] + """The type of this object. It is always set to "list".""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py b/src/digitalocean_genai_sdk/types/chat/completion_update_params.py new file mode 100644 index 00000000..1f09ecaa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/completion_update_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["CompletionUpdateParams"] + + +class CompletionUpdateParams(TypedDict, total=False): + metadata: Required[Optional[Dict[str, str]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/digitalocean_genai_sdk/types/chat/create_response.py b/src/digitalocean_genai_sdk/types/chat/create_response.py new file mode 100644 index 00000000..a6320518 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/create_response.py @@ -0,0 +1,73 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .usage import Usage +from ..._models import BaseModel +from .token_logprob import TokenLogprob +from .response_message import ResponseMessage + +__all__ = ["CreateResponse", "Choice", "ChoiceLogprobs"] + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[TokenLogprob]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[TokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + + +class Choice(BaseModel): + finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, `length` if the maximum number of tokens specified in the request was + reached, `content_filter` if content was omitted due to a flag from our content + filters, `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + message: ResponseMessage + """A chat completion message generated by the model.""" + + +class CreateResponse(BaseModel): + id: str + """A unique identifier for the chat completion.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can be more than one if `n` is greater than 1. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created.""" + + model: str + """The model used for the chat completion.""" + + object: Literal["chat.completion"] + """The object type, which is always `chat.completion`.""" + + service_tier: Optional[Literal["scale", "default"]] = None + """The service tier used for processing the request.""" + + system_fingerprint: Optional[str] = None + """This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when + backend changes have been made that might impact determinism. + """ + + usage: Optional[Usage] = None + """Usage statistics for the completion request.""" diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call.py new file mode 100644 index 00000000..abc22e05 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/message_tool_call.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["MessageToolCall", "Function"] + + +class Function(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class MessageToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: Function + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py new file mode 100644 index 00000000..da60f69a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["MessageToolCallParam", "Function"] + + +class Function(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class MessageToolCallParam(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[Function] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py b/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py new file mode 100644 index 00000000..497ba18c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +__all__ = ["ModelIDsSharedParam"] + +ModelIDsSharedParam: TypeAlias = Union[ + str, + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], +] diff --git a/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py b/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py new file mode 100644 index 00000000..8e83e40b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RequestMessageContentPartTextParam"] + + +class RequestMessageContentPartTextParam(TypedDict, total=False): + text: Required[str] + """The text content.""" + + type: Required[Literal["text"]] + """The type of the content part.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py new file mode 100644 index 00000000..17ca162a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatJsonObject"] + + +class ResponseFormatJsonObject(BaseModel): + type: Literal["json_object"] + """The type of response format being defined. Always `json_object`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py new file mode 100644 index 00000000..5296cec4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJsonObjectParam"] + + +class ResponseFormatJsonObjectParam(TypedDict, total=False): + type: Required[Literal["json_object"]] + """The type of response format being defined. Always `json_object`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py new file mode 100644 index 00000000..a65bf052 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["ResponseFormatJsonSchema", "JsonSchema"] + + +class JsonSchema(BaseModel): + name: str + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: Optional[str] = None + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None) + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + strict: Optional[bool] = None + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](/docs/guides/structured-outputs). + """ + + +class ResponseFormatJsonSchema(BaseModel): + json_schema: JsonSchema + """Structured Outputs configuration options, including a JSON Schema.""" + + type: Literal["json_schema"] + """The type of response format being defined. Always `json_schema`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py new file mode 100644 index 00000000..32d254c3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJsonSchemaParam", "JsonSchema"] + + +class JsonSchema(TypedDict, total=False): + name: Required[str] + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema: Dict[str, object] + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + strict: Optional[bool] + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](/docs/guides/structured-outputs). + """ + + +class ResponseFormatJsonSchemaParam(TypedDict, total=False): + json_schema: Required[JsonSchema] + """Structured Outputs configuration options, including a JSON Schema.""" + + type: Required[Literal["json_schema"]] + """The type of response format being defined. Always `json_schema`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text.py b/src/digitalocean_genai_sdk/types/chat/response_format_text.py new file mode 100644 index 00000000..f0c8cfb7 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(BaseModel): + type: Literal["text"] + """The type of response format being defined. Always `text`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py new file mode 100644 index 00000000..0d37573e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatTextParam"] + + +class ResponseFormatTextParam(TypedDict, total=False): + type: Required[Literal["text"]] + """The type of response format being defined. Always `text`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_message.py b/src/digitalocean_genai_sdk/types/chat/response_message.py new file mode 100644 index 00000000..940adf8f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/response_message.py @@ -0,0 +1,97 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .message_tool_call import MessageToolCall + +__all__ = ["ResponseMessage", "Annotation", "AnnotationURLCitation", "Audio", "FunctionCall"] + + +class AnnotationURLCitation(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + url: str + """The URL of the web resource.""" + + +class Annotation(BaseModel): + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url_citation: AnnotationURLCitation + """A URL citation when using web search.""" + + +class Audio(BaseModel): + id: str + """Unique identifier for this audio response.""" + + data: str + """ + Base64 encoded audio bytes generated by the model, in the format specified in + the request. + """ + + expires_at: int + """ + The Unix timestamp (in seconds) for when this audio response will no longer be + accessible on the server for use in multi-turn conversations. + """ + + transcript: str + """Transcript of the audio generated by the model.""" + + +class FunctionCall(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ResponseMessage(BaseModel): + content: Optional[str] = None + """The contents of the message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Literal["assistant"] + """The role of the author of this message.""" + + annotations: Optional[List[Annotation]] = None + """ + Annotations for the message, when applicable, as when using the + [web search tool](/docs/guides/tools-web-search?api-mode=chat). + """ + + audio: Optional[Audio] = None + """ + If the audio output modality is requested, this object contains data about the + audio response from the model. [Learn more](/docs/guides/audio). + """ + + function_call: Optional[FunctionCall] = None + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the + model. + """ + + tool_calls: Optional[List[MessageToolCall]] = None + """The tool calls generated by the model, such as function calls.""" diff --git a/src/digitalocean_genai_sdk/types/chat/token_logprob.py b/src/digitalocean_genai_sdk/types/chat/token_logprob.py new file mode 100644 index 00000000..d31943f6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/token_logprob.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["TokenLogprob", "TopLogprob"] + + +class TopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class TokenLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[TopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ diff --git a/src/digitalocean_genai_sdk/types/chat/usage.py b/src/digitalocean_genai_sdk/types/chat/usage.py new file mode 100644 index 00000000..1a7a1abf --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/usage.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["Usage", "CompletionTokensDetails", "PromptTokensDetails"] + + +class CompletionTokensDetails(BaseModel): + accepted_prediction_tokens: Optional[int] = None + """ + When using Predicted Outputs, the number of tokens in the prediction that + appeared in the completion. + """ + + audio_tokens: Optional[int] = None + """Audio input tokens generated by the model.""" + + reasoning_tokens: Optional[int] = None + """Tokens generated by the model for reasoning.""" + + rejected_prediction_tokens: Optional[int] = None + """ + When using Predicted Outputs, the number of tokens in the prediction that did + not appear in the completion. However, like reasoning tokens, these tokens are + still counted in the total completion tokens for purposes of billing, output, + and context window limits. + """ + + +class PromptTokensDetails(BaseModel): + audio_tokens: Optional[int] = None + """Audio input tokens present in the prompt.""" + + cached_tokens: Optional[int] = None + """Cached tokens present in the prompt.""" + + +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + completion_tokens_details: Optional[CompletionTokensDetails] = None + """Breakdown of tokens used in a completion.""" + + prompt_tokens_details: Optional[PromptTokensDetails] = None + """Breakdown of tokens used in the prompt.""" diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py b/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py new file mode 100644 index 00000000..18b284a9 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["WebSearchContextSize"] + +WebSearchContextSize: TypeAlias = Literal["low", "medium", "high"] diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location.py b/src/digitalocean_genai_sdk/types/chat/web_search_location.py new file mode 100644 index 00000000..192c4efa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/web_search_location.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["WebSearchLocation"] + + +class WebSearchLocation(BaseModel): + city: Optional[str] = None + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: Optional[str] = None + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: Optional[str] = None + """Free text input for the region of the user, e.g. `California`.""" + + timezone: Optional[str] = None + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py b/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py new file mode 100644 index 00000000..bc4d5a4c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["WebSearchLocationParam"] + + +class WebSearchLocationParam(TypedDict, total=False): + city: str + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: str + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: str + """Free text input for the region of the user, e.g. `California`.""" + + timezone: str + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ diff --git a/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py b/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py new file mode 100644 index 00000000..471e0eba --- /dev/null +++ b/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ChatCompletionStreamOptionsParam"] + + +class ChatCompletionStreamOptionsParam(TypedDict, total=False): + include_usage: bool + """If set, an additional chunk will be streamed before the `data: [DONE]` message. + + The `usage` field on this chunk shows the token usage statistics for the entire + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. + """ diff --git a/src/digitalocean_genai_sdk/types/comparison_filter.py b/src/digitalocean_genai_sdk/types/comparison_filter.py new file mode 100644 index 00000000..547aac28 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/comparison_filter.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ComparisonFilter"] + + +class ComparisonFilter(BaseModel): + key: str + """The key to compare against the value.""" + + type: Literal["eq", "ne", "gt", "gte", "lt", "lte"] + """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + + - `eq`: equals + - `ne`: not equal + - `gt`: greater than + - `gte`: greater than or equal + - `lt`: less than + - `lte`: less than or equal + """ + + value: Union[str, float, bool] + """ + The value to compare against the attribute key; supports string, number, or + boolean types. + """ diff --git a/src/digitalocean_genai_sdk/types/comparison_filter_param.py b/src/digitalocean_genai_sdk/types/comparison_filter_param.py new file mode 100644 index 00000000..2df2d744 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/comparison_filter_param.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ComparisonFilterParam"] + + +class ComparisonFilterParam(TypedDict, total=False): + key: Required[str] + """The key to compare against the value.""" + + type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]] + """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + + - `eq`: equals + - `ne`: not equal + - `gt`: greater than + - `gte`: greater than or equal + - `lt`: less than + - `lte`: less than or equal + """ + + value: Required[Union[str, float, bool]] + """ + The value to compare against the attribute key; supports string, number, or + boolean types. + """ diff --git a/src/digitalocean_genai_sdk/types/completion_create_params.py b/src/digitalocean_genai_sdk/types/completion_create_params.py new file mode 100644 index 00000000..36709c57 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/completion_create_params.py @@ -0,0 +1,168 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from .stop_configuration_param import StopConfigurationParam +from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam + +__all__ = ["CompletionCreateParams"] + + +class CompletionCreateParams(TypedDict, total=False): + model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]] + """ID of the model to use. + + You can use the [List models](/docs/api-reference/models/list) API to see all of + your available models, or see our [Model overview](/docs/models) for + descriptions of them. + """ + + prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] + """ + The prompt(s) to generate completions for, encoded as a string, array of + strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during + training, so if a prompt is not specified the model will generate as if from the + beginning of a new document. + """ + + best_of: Optional[int] + """ + Generates `best_of` completions server-side and returns the "best" (the one with + the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and + `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly + consume your token quota. Use carefully and ensure that you have reasonable + settings for `max_tokens` and `stop`. + """ + + echo: Optional[bool] + """Echo back the prompt in addition to the completion""" + + frequency_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on their existing frequency in the + text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation) + """ + + logit_bias: Optional[Dict[str, int]] + """Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT + tokenizer) to an associated bias value from -100 to 100. You can use this + [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + Mathematically, the bias is added to the logits generated by the model prior to + sampling. The exact effect will vary per model, but values between -1 and 1 + should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + from being generated. + """ + + logprobs: Optional[int] + """ + Include the log probabilities on the `logprobs` most likely output tokens, as + well the chosen tokens. For example, if `logprobs` is 5, the API will return a + list of the 5 most likely tokens. The API will always return the `logprob` of + the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + """ + + max_tokens: Optional[int] + """ + The maximum number of [tokens](/tokenizer) that can be generated in the + completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + """ + + n: Optional[int] + """How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly + consume your token quota. Use carefully and ensure that you have reasonable + settings for `max_tokens` and `stop`. + """ + + presence_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on whether they appear in the text so + far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation) + """ + + seed: Optional[int] + """ + If specified, our system will make a best effort to sample deterministically, + such that repeated requests with the same `seed` and parameters should return + the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` + response parameter to monitor changes in the backend. + """ + + stop: Optional[StopConfigurationParam] + """Up to 4 sequences where the API will stop generating further tokens. + + The returned text will not contain the stop sequence. + """ + + stream: Optional[bool] + """Whether to stream back partial progress. + + If set, tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` + message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + """ + + stream_options: Optional[ChatCompletionStreamOptionsParam] + """Options for streaming response. Only set this when you set `stream: true`.""" + + suffix: Optional[str] + """The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/digitalocean_genai_sdk/types/completion_create_response.py b/src/digitalocean_genai_sdk/types/completion_create_response.py new file mode 100644 index 00000000..2e1028bf --- /dev/null +++ b/src/digitalocean_genai_sdk/types/completion_create_response.py @@ -0,0 +1,63 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .chat.usage import Usage + +__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs"] + + +class ChoiceLogprobs(BaseModel): + text_offset: Optional[List[int]] = None + + token_logprobs: Optional[List[float]] = None + + tokens: Optional[List[str]] = None + + top_logprobs: Optional[List[Dict[str, float]]] = None + + +class Choice(BaseModel): + finish_reason: Literal["stop", "length", "content_filter"] + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, `length` if the maximum number of tokens specified in the request was + reached, or `content_filter` if content was omitted due to a flag from our + content filters. + """ + + index: int + + logprobs: Optional[ChoiceLogprobs] = None + + text: str + + +class CompletionCreateResponse(BaseModel): + id: str + """A unique identifier for the completion.""" + + choices: List[Choice] + """The list of completion choices the model generated for the input prompt.""" + + created: int + """The Unix timestamp (in seconds) of when the completion was created.""" + + model: str + """The model used for completion.""" + + object: Literal["text_completion"] + """The object type, which is always "text_completion" """ + + system_fingerprint: Optional[str] = None + """This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when + backend changes have been made that might impact determinism. + """ + + usage: Optional[Usage] = None + """Usage statistics for the completion request.""" diff --git a/src/digitalocean_genai_sdk/types/compound_filter.py b/src/digitalocean_genai_sdk/types/compound_filter.py new file mode 100644 index 00000000..bf1f793f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/compound_filter.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union +from typing_extensions import Literal, TypeAlias + +from .._models import BaseModel +from .comparison_filter import ComparisonFilter + +__all__ = ["CompoundFilter", "Filter"] + +Filter: TypeAlias = Union[ComparisonFilter, Dict[str, object]] + + +class CompoundFilter(BaseModel): + filters: List[Filter] + """Array of filters to combine. + + Items can be `ComparisonFilter` or `CompoundFilter`. + """ + + type: Literal["and", "or"] + """Type of operation: `and` or `or`.""" diff --git a/src/digitalocean_genai_sdk/types/compound_filter_param.py b/src/digitalocean_genai_sdk/types/compound_filter_param.py new file mode 100644 index 00000000..1f66a965 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/compound_filter_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .comparison_filter_param import ComparisonFilterParam + +__all__ = ["CompoundFilterParam", "Filter"] + +Filter: TypeAlias = Union[ComparisonFilterParam, Dict[str, object]] + + +class CompoundFilterParam(TypedDict, total=False): + filters: Required[Iterable[Filter]] + """Array of filters to combine. + + Items can be `ComparisonFilter` or `CompoundFilter`. + """ + + type: Required[Literal["and", "or"]] + """Type of operation: `and` or `or`.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call.py b/src/digitalocean_genai_sdk/types/computer_tool_call.py new file mode 100644 index 00000000..b127e694 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/computer_tool_call.py @@ -0,0 +1,198 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, TypeAlias + +from .._models import BaseModel +from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck + +__all__ = [ + "ComputerToolCall", + "Action", + "ActionClick", + "ActionDoubleClick", + "ActionDrag", + "ActionDragPath", + "ActionKeyPress", + "ActionMove", + "ActionScreenshot", + "ActionScroll", + "ActionType", + "ActionWait", +] + + +class ActionClick(BaseModel): + button: Literal["left", "right", "wheel", "back", "forward"] + """Indicates which mouse button was pressed during the click. + + One of `left`, `right`, `wheel`, `back`, or `forward`. + """ + + type: Literal["click"] + """Specifies the event type. + + For a click action, this property is always set to `click`. + """ + + x: int + """The x-coordinate where the click occurred.""" + + y: int + """The y-coordinate where the click occurred.""" + + +class ActionDoubleClick(BaseModel): + type: Literal["double_click"] + """Specifies the event type. + + For a double click action, this property is always set to `double_click`. + """ + + x: int + """The x-coordinate where the double click occurred.""" + + y: int + """The y-coordinate where the double click occurred.""" + + +class ActionDragPath(BaseModel): + x: int + """The x-coordinate.""" + + y: int + """The y-coordinate.""" + + +class ActionDrag(BaseModel): + path: List[ActionDragPath] + """An array of coordinates representing the path of the drag action. + + Coordinates will appear as an array of objects, eg + + ``` + [ + + { x: 100, y: 200 }, + { x: 200, y: 300 } + ] + ``` + """ + + type: Literal["drag"] + """Specifies the event type. + + For a drag action, this property is always set to `drag`. + """ + + +class ActionKeyPress(BaseModel): + keys: List[str] + """The combination of keys the model is requesting to be pressed. + + This is an array of strings, each representing a key. + """ + + type: Literal["keypress"] + """Specifies the event type. + + For a keypress action, this property is always set to `keypress`. + """ + + +class ActionMove(BaseModel): + type: Literal["move"] + """Specifies the event type. + + For a move action, this property is always set to `move`. + """ + + x: int + """The x-coordinate to move to.""" + + y: int + """The y-coordinate to move to.""" + + +class ActionScreenshot(BaseModel): + type: Literal["screenshot"] + """Specifies the event type. + + For a screenshot action, this property is always set to `screenshot`. + """ + + +class ActionScroll(BaseModel): + scroll_x: int + """The horizontal scroll distance.""" + + scroll_y: int + """The vertical scroll distance.""" + + type: Literal["scroll"] + """Specifies the event type. + + For a scroll action, this property is always set to `scroll`. + """ + + x: int + """The x-coordinate where the scroll occurred.""" + + y: int + """The y-coordinate where the scroll occurred.""" + + +class ActionType(BaseModel): + text: str + """The text to type.""" + + type: Literal["type"] + """Specifies the event type. + + For a type action, this property is always set to `type`. + """ + + +class ActionWait(BaseModel): + type: Literal["wait"] + """Specifies the event type. + + For a wait action, this property is always set to `wait`. + """ + + +Action: TypeAlias = Union[ + ActionClick, + ActionDoubleClick, + ActionDrag, + ActionKeyPress, + ActionMove, + ActionScreenshot, + ActionScroll, + ActionType, + ActionWait, +] + + +class ComputerToolCall(BaseModel): + id: str + """The unique ID of the computer call.""" + + action: Action + """A click action.""" + + call_id: str + """An identifier used when responding to the tool call with output.""" + + pending_safety_checks: List[ComputerToolCallSafetyCheck] + """The pending safety checks for the computer call.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["computer_call"] + """The type of the computer call. Always `computer_call`.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output.py new file mode 100644 index 00000000..0133a29a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/computer_tool_call_output.py @@ -0,0 +1,50 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck + +__all__ = ["ComputerToolCallOutput", "Output"] + + +class Output(BaseModel): + type: Literal["computer_screenshot"] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: Optional[str] = None + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: Optional[str] = None + """The URL of the screenshot image.""" + + +class ComputerToolCallOutput(BaseModel): + call_id: str + """The ID of the computer tool call that produced the output.""" + + output: Output + """A computer screenshot image used with the computer use tool.""" + + type: Literal["computer_call_output"] + """The type of the computer tool call output. Always `computer_call_output`.""" + + id: Optional[str] = None + """The ID of the computer tool call output.""" + + acknowledged_safety_checks: Optional[List[ComputerToolCallSafetyCheck]] = None + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py new file mode 100644 index 00000000..764c4da8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam + +__all__ = ["ComputerToolCallOutputParam", "Output"] + + +class Output(TypedDict, total=False): + type: Required[Literal["computer_screenshot"]] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: str + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: str + """The URL of the screenshot image.""" + + +class ComputerToolCallOutputParam(TypedDict, total=False): + call_id: Required[str] + """The ID of the computer tool call that produced the output.""" + + output: Required[Output] + """A computer screenshot image used with the computer use tool.""" + + type: Required[Literal["computer_call_output"]] + """The type of the computer tool call output. Always `computer_call_output`.""" + + id: str + """The ID of the computer tool call output.""" + + acknowledged_safety_checks: Iterable[ComputerToolCallSafetyCheckParam] + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_param.py new file mode 100644 index 00000000..7fb87bfa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/computer_tool_call_param.py @@ -0,0 +1,199 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam + +__all__ = [ + "ComputerToolCallParam", + "Action", + "ActionClick", + "ActionDoubleClick", + "ActionDrag", + "ActionDragPath", + "ActionKeyPress", + "ActionMove", + "ActionScreenshot", + "ActionScroll", + "ActionType", + "ActionWait", +] + + +class ActionClick(TypedDict, total=False): + button: Required[Literal["left", "right", "wheel", "back", "forward"]] + """Indicates which mouse button was pressed during the click. + + One of `left`, `right`, `wheel`, `back`, or `forward`. + """ + + type: Required[Literal["click"]] + """Specifies the event type. + + For a click action, this property is always set to `click`. + """ + + x: Required[int] + """The x-coordinate where the click occurred.""" + + y: Required[int] + """The y-coordinate where the click occurred.""" + + +class ActionDoubleClick(TypedDict, total=False): + type: Required[Literal["double_click"]] + """Specifies the event type. + + For a double click action, this property is always set to `double_click`. + """ + + x: Required[int] + """The x-coordinate where the double click occurred.""" + + y: Required[int] + """The y-coordinate where the double click occurred.""" + + +class ActionDragPath(TypedDict, total=False): + x: Required[int] + """The x-coordinate.""" + + y: Required[int] + """The y-coordinate.""" + + +class ActionDrag(TypedDict, total=False): + path: Required[Iterable[ActionDragPath]] + """An array of coordinates representing the path of the drag action. + + Coordinates will appear as an array of objects, eg + + ``` + [ + + { x: 100, y: 200 }, + { x: 200, y: 300 } + ] + ``` + """ + + type: Required[Literal["drag"]] + """Specifies the event type. + + For a drag action, this property is always set to `drag`. + """ + + +class ActionKeyPress(TypedDict, total=False): + keys: Required[List[str]] + """The combination of keys the model is requesting to be pressed. + + This is an array of strings, each representing a key. + """ + + type: Required[Literal["keypress"]] + """Specifies the event type. + + For a keypress action, this property is always set to `keypress`. + """ + + +class ActionMove(TypedDict, total=False): + type: Required[Literal["move"]] + """Specifies the event type. + + For a move action, this property is always set to `move`. + """ + + x: Required[int] + """The x-coordinate to move to.""" + + y: Required[int] + """The y-coordinate to move to.""" + + +class ActionScreenshot(TypedDict, total=False): + type: Required[Literal["screenshot"]] + """Specifies the event type. + + For a screenshot action, this property is always set to `screenshot`. + """ + + +class ActionScroll(TypedDict, total=False): + scroll_x: Required[int] + """The horizontal scroll distance.""" + + scroll_y: Required[int] + """The vertical scroll distance.""" + + type: Required[Literal["scroll"]] + """Specifies the event type. + + For a scroll action, this property is always set to `scroll`. + """ + + x: Required[int] + """The x-coordinate where the scroll occurred.""" + + y: Required[int] + """The y-coordinate where the scroll occurred.""" + + +class ActionType(TypedDict, total=False): + text: Required[str] + """The text to type.""" + + type: Required[Literal["type"]] + """Specifies the event type. + + For a type action, this property is always set to `type`. + """ + + +class ActionWait(TypedDict, total=False): + type: Required[Literal["wait"]] + """Specifies the event type. + + For a wait action, this property is always set to `wait`. + """ + + +Action: TypeAlias = Union[ + ActionClick, + ActionDoubleClick, + ActionDrag, + ActionKeyPress, + ActionMove, + ActionScreenshot, + ActionScroll, + ActionType, + ActionWait, +] + + +class ComputerToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the computer call.""" + + action: Required[Action] + """A click action.""" + + call_id: Required[str] + """An identifier used when responding to the tool call with output.""" + + pending_safety_checks: Required[Iterable[ComputerToolCallSafetyCheckParam]] + """The pending safety checks for the computer call.""" + + status: Required[Literal["in_progress", "completed", "incomplete"]] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Required[Literal["computer_call"]] + """The type of the computer call. Always `computer_call`.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py new file mode 100644 index 00000000..e24b9f35 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["ComputerToolCallSafetyCheck"] + + +class ComputerToolCallSafetyCheck(BaseModel): + id: str + """The ID of the pending safety check.""" + + code: str + """The type of the pending safety check.""" + + message: str + """Details about the pending safety check.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py new file mode 100644 index 00000000..859d6b59 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ComputerToolCallSafetyCheckParam"] + + +class ComputerToolCallSafetyCheckParam(TypedDict, total=False): + id: Required[str] + """The ID of the pending safety check.""" + + code: Required[str] + """The type of the pending safety check.""" + + message: Required[str] + """Details about the pending safety check.""" diff --git a/src/digitalocean_genai_sdk/types/create_thread_request_param.py b/src/digitalocean_genai_sdk/types/create_thread_request_param.py new file mode 100644 index 00000000..3a8f59b4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/create_thread_request_param.py @@ -0,0 +1,130 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .threads.create_message_request_param import CreateMessageRequestParam + +__all__ = [ + "CreateThreadRequestParam", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", + "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic", +] + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy, + ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy, +] + + +class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + + file_ids: List[str] + """A list of [file](/docs/api-reference/files) IDs to add to the vector store. + + There can be a maximum of 10000 files in a vector store. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The [vector store](/docs/api-reference/vector-stores/object) attached to this + thread. There can be a maximum of 1 vector store attached to the thread. + """ + + vector_stores: Iterable[ToolResourcesFileSearchVectorStore] + """ + A helper to create a [vector store](/docs/api-reference/vector-stores/object) + with file_ids and attach it to this thread. There can be a maximum of 1 vector + store attached to the thread. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch + + +class CreateThreadRequestParam(TypedDict, total=False): + messages: Iterable[CreateMessageRequestParam] + """A list of [messages](/docs/api-reference/messages) to start the thread with.""" + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + tool_resources: Optional[ToolResources] + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ diff --git a/src/digitalocean_genai_sdk/types/embedding_create_params.py b/src/digitalocean_genai_sdk/types/embedding_create_params.py new file mode 100644 index 00000000..caf65415 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/embedding_create_params.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["EmbeddingCreateParams"] + + +class EmbeddingCreateParams(TypedDict, total=False): + input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]] + """Input text to embed, encoded as a string or array of tokens. + + To embed multiple inputs in a single request, pass an array of strings or array + of token arrays. The input must not exceed the max input tokens for the model + (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any + array must be 2048 dimensions or less. + [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. + """ + + model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]] + """ID of the model to use. + + You can use the [List models](/docs/api-reference/models/list) API to see all of + your available models, or see our [Model overview](/docs/models) for + descriptions of them. + """ + + dimensions: int + """The number of dimensions the resulting output embeddings should have. + + Only supported in `text-embedding-3` and later models. + """ + + encoding_format: Literal["float", "base64"] + """The format to return the embeddings in. + + Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/digitalocean_genai_sdk/types/embedding_create_response.py b/src/digitalocean_genai_sdk/types/embedding_create_response.py new file mode 100644 index 00000000..e85daaba --- /dev/null +++ b/src/digitalocean_genai_sdk/types/embedding_create_response.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["EmbeddingCreateResponse", "Data", "Usage"] + + +class Data(BaseModel): + embedding: List[float] + """The embedding vector, which is a list of floats. + + The length of vector depends on the model as listed in the + [embedding guide](/docs/guides/embeddings). + """ + + index: int + """The index of the embedding in the list of embeddings.""" + + object: Literal["embedding"] + """The object type, which is always "embedding".""" + + +class Usage(BaseModel): + prompt_tokens: int + """The number of tokens used by the prompt.""" + + total_tokens: int + """The total number of tokens used by the request.""" + + +class EmbeddingCreateResponse(BaseModel): + data: List[Data] + """The list of embeddings generated by the model.""" + + model: str + """The name of the model used to generate the embedding.""" + + object: Literal["list"] + """The object type, which is always "list".""" + + usage: Usage + """The usage information for the request.""" diff --git a/src/digitalocean_genai_sdk/types/file_delete_response.py b/src/digitalocean_genai_sdk/types/file_delete_response.py new file mode 100644 index 00000000..26e2e053 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["FileDeleteResponse"] + + +class FileDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["file"] diff --git a/src/digitalocean_genai_sdk/types/file_list_params.py b/src/digitalocean_genai_sdk/types/file_list_params.py new file mode 100644 index 00000000..058d874c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_list_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["FileListParams"] + + +class FileListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 10,000, and the default is 10,000. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ + + purpose: str + """Only return files with the given purpose.""" diff --git a/src/digitalocean_genai_sdk/types/file_list_response.py b/src/digitalocean_genai_sdk/types/file_list_response.py new file mode 100644 index 00000000..db9ef641 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .._models import BaseModel +from .openai_file import OpenAIFile + +__all__ = ["FileListResponse"] + + +class FileListResponse(BaseModel): + data: List[OpenAIFile] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py new file mode 100644 index 00000000..20c945db --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import TypeAlias + +__all__ = ["FileRetrieveContentResponse"] + +FileRetrieveContentResponse: TypeAlias = str diff --git a/src/digitalocean_genai_sdk/types/file_search_ranker.py b/src/digitalocean_genai_sdk/types/file_search_ranker.py new file mode 100644 index 00000000..d4aabe5a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_search_ranker.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["FileSearchRanker"] + +FileSearchRanker: TypeAlias = Literal["auto", "default_2024_08_21"] diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call.py b/src/digitalocean_genai_sdk/types/file_search_tool_call.py new file mode 100644 index 00000000..04542379 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_search_tool_call.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["FileSearchToolCall", "Result"] + + +class Result(BaseModel): + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + file_id: Optional[str] = None + """The unique ID of the file.""" + + filename: Optional[str] = None + """The name of the file.""" + + score: Optional[float] = None + """The relevance score of the file - a value between 0 and 1.""" + + text: Optional[str] = None + """The text that was retrieved from the file.""" + + +class FileSearchToolCall(BaseModel): + id: str + """The unique ID of the file search tool call.""" + + queries: List[str] + """The queries used to search for files.""" + + status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] + """The status of the file search tool call. + + One of `in_progress`, `searching`, `incomplete` or `failed`, + """ + + type: Literal["file_search_call"] + """The type of the file search tool call. Always `file_search_call`.""" + + results: Optional[List[Result]] = None + """The results of the file search tool call.""" diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py new file mode 100644 index 00000000..315dc90e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FileSearchToolCallParam", "Result"] + + +class Result(TypedDict, total=False): + attributes: Optional[Dict[str, Union[str, float, bool]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + file_id: str + """The unique ID of the file.""" + + filename: str + """The name of the file.""" + + score: float + """The relevance score of the file - a value between 0 and 1.""" + + text: str + """The text that was retrieved from the file.""" + + +class FileSearchToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the file search tool call.""" + + queries: Required[List[str]] + """The queries used to search for files.""" + + status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]] + """The status of the file search tool call. + + One of `in_progress`, `searching`, `incomplete` or `failed`, + """ + + type: Required[Literal["file_search_call"]] + """The type of the file search tool call. Always `file_search_call`.""" + + results: Optional[Iterable[Result]] + """The results of the file search tool call.""" diff --git a/src/digitalocean_genai_sdk/types/file_upload_params.py b/src/digitalocean_genai_sdk/types/file_upload_params.py new file mode 100644 index 00000000..5b42fc50 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/file_upload_params.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .._types import FileTypes + +__all__ = ["FileUploadParams"] + + +class FileUploadParams(TypedDict, total=False): + file: Required[FileTypes] + """The File object (not file name) to be uploaded.""" + + purpose: Required[Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]] + """The intended purpose of the uploaded file. + + One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch + API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision + fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used + for eval data sets + """ diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py new file mode 100644 index 00000000..6b7dcea7 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .fine_tuning_job import FineTuningJob as FineTuningJob +from .job_list_params import JobListParams as JobListParams +from .fine_tune_method import FineTuneMethod as FineTuneMethod +from .job_create_params import JobCreateParams as JobCreateParams +from .job_list_response import JobListResponse as JobListResponse +from .fine_tune_method_param import FineTuneMethodParam as FineTuneMethodParam diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py new file mode 100644 index 00000000..6b30e048 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .permission_create_params import PermissionCreateParams as PermissionCreateParams +from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse +from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams +from .list_fine_tuning_checkpoint_permission import ( + ListFineTuningCheckpointPermission as ListFineTuningCheckpointPermission, +) diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py new file mode 100644 index 00000000..9136bf5d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ListFineTuningCheckpointPermission", "Data"] + + +class Data(BaseModel): + id: str + """The permission identifier, which can be referenced in the API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the permission was created.""" + + object: Literal["checkpoint.permission"] + """The object type, which is always "checkpoint.permission".""" + + project_id: str + """The project identifier that the permission is for.""" + + +class ListFineTuningCheckpointPermission(BaseModel): + data: List[Data] + + has_more: bool + + object: Literal["list"] + + first_id: Optional[str] = None + + last_id: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py new file mode 100644 index 00000000..92f98f21 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +__all__ = ["PermissionCreateParams"] + + +class PermissionCreateParams(TypedDict, total=False): + project_ids: Required[List[str]] + """The project identifiers to grant access to.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py new file mode 100644 index 00000000..1a92d912 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["PermissionDeleteResponse"] + + +class PermissionDeleteResponse(BaseModel): + id: str + """The ID of the fine-tuned model checkpoint permission that was deleted.""" + + deleted: bool + """Whether the fine-tuned model checkpoint permission was successfully deleted.""" + + object: Literal["checkpoint.permission"] + """The object type, which is always "checkpoint.permission".""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py new file mode 100644 index 00000000..6e66a867 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["PermissionRetrieveParams"] + + +class PermissionRetrieveParams(TypedDict, total=False): + after: str + """Identifier for the last permission ID from the previous pagination request.""" + + limit: int + """Number of permissions to retrieve.""" + + order: Literal["ascending", "descending"] + """The order in which to retrieve permissions.""" + + project_id: str + """The ID of the project to get permissions for.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py new file mode 100644 index 00000000..6ad8f7a5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py @@ -0,0 +1,78 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FineTuneMethod", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"] + + +class DpoHyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + beta: Union[Literal["auto"], float, None] = None + """The beta value for the DPO method. + + A higher beta value will increase the weight of the penalty between the policy + and reference model. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class Dpo(BaseModel): + hyperparameters: Optional[DpoHyperparameters] = None + """The hyperparameters used for the fine-tuning job.""" + + +class SupervisedHyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class Supervised(BaseModel): + hyperparameters: Optional[SupervisedHyperparameters] = None + """The hyperparameters used for the fine-tuning job.""" + + +class FineTuneMethod(BaseModel): + dpo: Optional[Dpo] = None + """Configuration for the DPO fine-tuning method.""" + + supervised: Optional[Supervised] = None + """Configuration for the supervised fine-tuning method.""" + + type: Optional[Literal["supervised", "dpo"]] = None + """The type of method. Is either `supervised` or `dpo`.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py new file mode 100644 index 00000000..e28abc93 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py @@ -0,0 +1,78 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypedDict + +__all__ = ["FineTuneMethodParam", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"] + + +class DpoHyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + beta: Union[Literal["auto"], float] + """The beta value for the DPO method. + + A higher beta value will increase the weight of the penalty between the policy + and reference model. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class Dpo(TypedDict, total=False): + hyperparameters: DpoHyperparameters + """The hyperparameters used for the fine-tuning job.""" + + +class SupervisedHyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class Supervised(TypedDict, total=False): + hyperparameters: SupervisedHyperparameters + """The hyperparameters used for the fine-tuning job.""" + + +class FineTuneMethodParam(TypedDict, total=False): + dpo: Dpo + """Configuration for the DPO fine-tuning method.""" + + supervised: Supervised + """Configuration for the supervised fine-tuning method.""" + + type: Literal["supervised", "dpo"] + """The type of method. Is either `supervised` or `dpo`.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py new file mode 100644 index 00000000..29f387a1 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py @@ -0,0 +1,182 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .fine_tune_method import FineTuneMethod + +__all__ = ["FineTuningJob", "Error", "Hyperparameters", "Integration", "IntegrationWandb"] + + +class Error(BaseModel): + code: str + """A machine-readable error code.""" + + message: str + """A human-readable error message.""" + + param: Optional[str] = None + """The parameter that was invalid, usually `training_file` or `validation_file`. + + This field will be null if the failure was not parameter-specific. + """ + + +class Hyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class IntegrationWandb(BaseModel): + project: str + """The name of the project that the new run will be created under.""" + + entity: Optional[str] = None + """The entity to use for the run. + + This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered + WandB API key is used. + """ + + name: Optional[str] = None + """A display name to set for the run. + + If not set, we will use the Job ID as the name. + """ + + tags: Optional[List[str]] = None + """A list of tags to be attached to the newly created run. + + These tags are passed through directly to WandB. Some default tags are generated + by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + """ + + +class Integration(BaseModel): + type: Literal["wandb"] + """The type of the integration being enabled for the fine-tuning job""" + + wandb: IntegrationWandb + """The settings for your integration with Weights and Biases. + + This payload specifies the project that metrics will be sent to. Optionally, you + can set an explicit display name for your run, add tags to your run, and set a + default entity (team, username, etc) to be associated with your run. + """ + + +class FineTuningJob(BaseModel): + id: str + """The object identifier, which can be referenced in the API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" + + error: Optional[Error] = None + """ + For fine-tuning jobs that have `failed`, this will contain more information on + the cause of the failure. + """ + + fine_tuned_model: Optional[str] = None + """The name of the fine-tuned model that is being created. + + The value will be null if the fine-tuning job is still running. + """ + + finished_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the fine-tuning job was finished. + + The value will be null if the fine-tuning job is still running. + """ + + hyperparameters: Hyperparameters + """The hyperparameters used for the fine-tuning job. + + This value will only be returned when running `supervised` jobs. + """ + + model: str + """The base model that is being fine-tuned.""" + + object: Literal["fine_tuning.job"] + """The object type, which is always "fine_tuning.job".""" + + organization_id: str + """The organization that owns the fine-tuning job.""" + + result_files: List[str] + """The compiled results file ID(s) for the fine-tuning job. + + You can retrieve the results with the + [Files API](/docs/api-reference/files/retrieve-contents). + """ + + seed: int + """The seed used for the fine-tuning job.""" + + status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"] + """ + The current status of the fine-tuning job, which can be either + `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + """ + + trained_tokens: Optional[int] = None + """The total number of billable tokens processed by this fine-tuning job. + + The value will be null if the fine-tuning job is still running. + """ + + training_file: str + """The file ID used for training. + + You can retrieve the training data with the + [Files API](/docs/api-reference/files/retrieve-contents). + """ + + validation_file: Optional[str] = None + """The file ID used for validation. + + You can retrieve the validation results with the + [Files API](/docs/api-reference/files/retrieve-contents). + """ + + estimated_finish: Optional[int] = None + """ + The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + finish. The value will be null if the fine-tuning job is not running. + """ + + integrations: Optional[List[Integration]] = None + """A list of integrations to enable for this fine-tuning job.""" + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + method: Optional[FineTuneMethod] = None + """The method used for fine-tuning.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py new file mode 100644 index 00000000..a538e659 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py @@ -0,0 +1,152 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from .fine_tune_method_param import FineTuneMethodParam + +__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb"] + + +class JobCreateParams(TypedDict, total=False): + model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]] + """The name of the model to fine-tune. + + You can select one of the + [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + """ + + training_file: Required[str] + """The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload + your file with the purpose `fine-tune`. + + The contents of the file should differ depending on if the model uses the + [chat](/docs/api-reference/fine-tuning/chat-input), + [completions](/docs/api-reference/fine-tuning/completions-input) format, or if + the fine-tuning method uses the + [preference](/docs/api-reference/fine-tuning/preference-input) format. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + """ + + hyperparameters: Hyperparameters + """ + The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. + """ + + integrations: Optional[Iterable[Integration]] + """A list of integrations to enable for your fine-tuning job.""" + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + method: FineTuneMethodParam + """The method used for fine-tuning.""" + + seed: Optional[int] + """The seed controls the reproducibility of the job. + + Passing in the same seed and job parameters should produce the same results, but + may differ in rare cases. If a seed is not specified, one will be generated for + you. + """ + + suffix: Optional[str] + """ + A string of up to 64 characters that will be added to your fine-tuned model + name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + """ + + validation_file: Optional[str] + """The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics + periodically during fine-tuning. These metrics can be viewed in the fine-tuning + results file. The same data should not be present in both train and validation + files. + + Your dataset must be formatted as a JSONL file. You must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + """ + + +class Hyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class IntegrationWandb(TypedDict, total=False): + project: Required[str] + """The name of the project that the new run will be created under.""" + + entity: Optional[str] + """The entity to use for the run. + + This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered + WandB API key is used. + """ + + name: Optional[str] + """A display name to set for the run. + + If not set, we will use the Job ID as the name. + """ + + tags: List[str] + """A list of tags to be attached to the newly created run. + + These tags are passed through directly to WandB. Some default tags are generated + by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + """ + + +class Integration(TypedDict, total=False): + type: Required[Literal["wandb"]] + """The type of integration to enable. + + Currently, only "wandb" (Weights and Biases) is supported. + """ + + wandb: Required[IntegrationWandb] + """The settings for your integration with Weights and Biases. + + This payload specifies the project that metrics will be sent to. Optionally, you + can set an explicit display name for your run, add tags to your run, and set a + default entity (team, username, etc) to be associated with your run. + """ diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py new file mode 100644 index 00000000..b79f3ce8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import TypedDict + +__all__ = ["JobListParams"] + + +class JobListParams(TypedDict, total=False): + after: str + """Identifier for the last job from the previous pagination request.""" + + limit: int + """Number of fine-tuning jobs to retrieve.""" + + metadata: Optional[Dict[str, str]] + """Optional metadata filter. + + To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to + indicate no metadata. + """ diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py new file mode 100644 index 00000000..ea6eb6a8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .fine_tuning_job import FineTuningJob + +__all__ = ["JobListResponse"] + + +class JobListResponse(BaseModel): + data: List[FineTuningJob] + + has_more: bool + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py new file mode 100644 index 00000000..9ba11022 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .event_retrieve_params import EventRetrieveParams as EventRetrieveParams +from .event_retrieve_response import EventRetrieveResponse as EventRetrieveResponse +from .checkpoint_retrieve_params import CheckpointRetrieveParams as CheckpointRetrieveParams +from .checkpoint_retrieve_response import CheckpointRetrieveResponse as CheckpointRetrieveResponse diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py new file mode 100644 index 00000000..34666a9f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["CheckpointRetrieveParams"] + + +class CheckpointRetrieveParams(TypedDict, total=False): + after: str + """Identifier for the last checkpoint ID from the previous pagination request.""" + + limit: int + """Number of checkpoints to retrieve.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py new file mode 100644 index 00000000..bf0af44d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py @@ -0,0 +1,59 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["CheckpointRetrieveResponse", "Data", "DataMetrics"] + + +class DataMetrics(BaseModel): + full_valid_loss: Optional[float] = None + + full_valid_mean_token_accuracy: Optional[float] = None + + step: Optional[float] = None + + train_loss: Optional[float] = None + + train_mean_token_accuracy: Optional[float] = None + + valid_loss: Optional[float] = None + + valid_mean_token_accuracy: Optional[float] = None + + +class Data(BaseModel): + id: str + """The checkpoint identifier, which can be referenced in the API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the checkpoint was created.""" + + fine_tuned_model_checkpoint: str + """The name of the fine-tuned checkpoint model that is created.""" + + fine_tuning_job_id: str + """The name of the fine-tuning job that this checkpoint was created from.""" + + metrics: DataMetrics + """Metrics at the step number during the fine-tuning job.""" + + object: Literal["fine_tuning.job.checkpoint"] + """The object type, which is always "fine_tuning.job.checkpoint".""" + + step_number: int + """The step number that the checkpoint was created at.""" + + +class CheckpointRetrieveResponse(BaseModel): + data: List[Data] + + has_more: bool + + object: Literal["list"] + + first_id: Optional[str] = None + + last_id: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py new file mode 100644 index 00000000..f0162e0e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EventRetrieveParams"] + + +class EventRetrieveParams(TypedDict, total=False): + after: str + """Identifier for the last event from the previous pagination request.""" + + limit: int + """Number of events to retrieve.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py new file mode 100644 index 00000000..8c22fe30 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import builtins +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["EventRetrieveResponse", "Data"] + + +class Data(BaseModel): + id: str + """The object identifier.""" + + created_at: int + """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" + + level: Literal["info", "warn", "error"] + """The log level of the event.""" + + message: str + """The message of the event.""" + + object: Literal["fine_tuning.job.event"] + """The object type, which is always "fine_tuning.job.event".""" + + data: Optional[builtins.object] = None + """The data associated with the event.""" + + type: Optional[Literal["message", "metrics"]] = None + """The type of event.""" + + +class EventRetrieveResponse(BaseModel): + data: List[Data] + + has_more: bool + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/function_object.py b/src/digitalocean_genai_sdk/types/function_object.py new file mode 100644 index 00000000..4fe27f86 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/function_object.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional + +from .._models import BaseModel + +__all__ = ["FunctionObject"] + + +class FunctionObject(BaseModel): + name: str + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: Optional[str] = None + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Optional[Dict[str, object]] = None + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + strict: Optional[bool] = None + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/src/digitalocean_genai_sdk/types/function_object_param.py b/src/digitalocean_genai_sdk/types/function_object_param.py new file mode 100644 index 00000000..1a358408 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/function_object_param.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["FunctionObjectParam"] + + +class FunctionObjectParam(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Dict[str, object] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call.py b/src/digitalocean_genai_sdk/types/function_tool_call.py new file mode 100644 index 00000000..ecdb4a02 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/function_tool_call.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["FunctionToolCall"] + + +class FunctionToolCall(BaseModel): + arguments: str + """A JSON string of the arguments to pass to the function.""" + + call_id: str + """The unique ID of the function tool call generated by the model.""" + + name: str + """The name of the function to run.""" + + type: Literal["function_call"] + """The type of the function tool call. Always `function_call`.""" + + id: Optional[str] = None + """The unique ID of the function tool call.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output.py b/src/digitalocean_genai_sdk/types/function_tool_call_output.py new file mode 100644 index 00000000..4cbe27ce --- /dev/null +++ b/src/digitalocean_genai_sdk/types/function_tool_call_output.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["FunctionToolCallOutput"] + + +class FunctionToolCallOutput(BaseModel): + call_id: str + """The unique ID of the function tool call generated by the model.""" + + output: str + """A JSON string of the output of the function tool call.""" + + type: Literal["function_call_output"] + """The type of the function tool call output. Always `function_call_output`.""" + + id: Optional[str] = None + """The unique ID of the function tool call output. + + Populated when this item is returned via API. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py new file mode 100644 index 00000000..49a573ed --- /dev/null +++ b/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FunctionToolCallOutputParam"] + + +class FunctionToolCallOutputParam(TypedDict, total=False): + call_id: Required[str] + """The unique ID of the function tool call generated by the model.""" + + output: Required[str] + """A JSON string of the output of the function tool call.""" + + type: Required[Literal["function_call_output"]] + """The type of the function tool call output. Always `function_call_output`.""" + + id: str + """The unique ID of the function tool call output. + + Populated when this item is returned via API. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_param.py new file mode 100644 index 00000000..91e076b6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/function_tool_call_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FunctionToolCallParam"] + + +class FunctionToolCallParam(TypedDict, total=False): + arguments: Required[str] + """A JSON string of the arguments to pass to the function.""" + + call_id: Required[str] + """The unique ID of the function tool call generated by the model.""" + + name: Required[str] + """The name of the function to run.""" + + type: Required[Literal["function_call"]] + """The type of the function tool call. Always `function_call`.""" + + id: str + """The unique ID of the function tool call.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/image_create_edit_params.py b/src/digitalocean_genai_sdk/types/image_create_edit_params.py new file mode 100644 index 00000000..f84f5642 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/image_create_edit_params.py @@ -0,0 +1,60 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypedDict + +from .._types import FileTypes + +__all__ = ["ImageCreateEditParams"] + + +class ImageCreateEditParams(TypedDict, total=False): + image: Required[FileTypes] + """The image to edit. + + Must be a valid PNG file, less than 4MB, and square. If mask is not provided, + image must have transparency, which will be used as the mask. + """ + + prompt: Required[str] + """A text description of the desired image(s). + + The maximum length is 1000 characters. + """ + + mask: FileTypes + """An additional image whose fully transparent areas (e.g. + + where alpha is zero) indicate where `image` should be edited. Must be a valid + PNG file, less than 4MB, and have the same dimensions as `image`. + """ + + model: Union[str, Literal["dall-e-2"], None] + """The model to use for image generation. + + Only `dall-e-2` is supported at this time. + """ + + n: Optional[int] + """The number of images to generate. Must be between 1 and 10.""" + + response_format: Optional[Literal["url", "b64_json"]] + """The format in which the generated images are returned. + + Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + """ + + size: Optional[Literal["256x256", "512x512", "1024x1024"]] + """The size of the generated images. + + Must be one of `256x256`, `512x512`, or `1024x1024`. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/digitalocean_genai_sdk/types/image_create_generation_params.py b/src/digitalocean_genai_sdk/types/image_create_generation_params.py new file mode 100644 index 00000000..e8cfbb18 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/image_create_generation_params.py @@ -0,0 +1,62 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ImageCreateGenerationParams"] + + +class ImageCreateGenerationParams(TypedDict, total=False): + prompt: Required[str] + """A text description of the desired image(s). + + The maximum length is 1000 characters for `dall-e-2` and 4000 characters for + `dall-e-3`. + """ + + model: Union[str, Literal["dall-e-2", "dall-e-3"], None] + """The model to use for image generation.""" + + n: Optional[int] + """The number of images to generate. + + Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + """ + + quality: Literal["standard", "hd"] + """The quality of the image that will be generated. + + `hd` creates images with finer details and greater consistency across the image. + This param is only supported for `dall-e-3`. + """ + + response_format: Optional[Literal["url", "b64_json"]] + """The format in which the generated images are returned. + + Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + """ + + size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] + """The size of the generated images. + + Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one + of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + """ + + style: Optional[Literal["vivid", "natural"]] + """The style of the generated images. + + Must be one of `vivid` or `natural`. Vivid causes the model to lean towards + generating hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. This param is only supported for + `dall-e-3`. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/digitalocean_genai_sdk/types/image_create_variation_params.py b/src/digitalocean_genai_sdk/types/image_create_variation_params.py new file mode 100644 index 00000000..64245a05 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/image_create_variation_params.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypedDict + +from .._types import FileTypes + +__all__ = ["ImageCreateVariationParams"] + + +class ImageCreateVariationParams(TypedDict, total=False): + image: Required[FileTypes] + """The image to use as the basis for the variation(s). + + Must be a valid PNG file, less than 4MB, and square. + """ + + model: Union[str, Literal["dall-e-2"], None] + """The model to use for image generation. + + Only `dall-e-2` is supported at this time. + """ + + n: Optional[int] + """The number of images to generate. + + Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + """ + + response_format: Optional[Literal["url", "b64_json"]] + """The format in which the generated images are returned. + + Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + """ + + size: Optional[Literal["256x256", "512x512", "1024x1024"]] + """The size of the generated images. + + Must be one of `256x256`, `512x512`, or `1024x1024`. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/digitalocean_genai_sdk/types/images_response.py b/src/digitalocean_genai_sdk/types/images_response.py new file mode 100644 index 00000000..509e0069 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/images_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel + +__all__ = ["ImagesResponse", "Data"] + + +class Data(BaseModel): + b64_json: Optional[str] = None + """ + The base64-encoded JSON of the generated image, if `response_format` is + `b64_json`. + """ + + revised_prompt: Optional[str] = None + """ + The prompt that was used to generate the image, if there was any revision to the + prompt. + """ + + url: Optional[str] = None + """The URL of the generated image, if `response_format` is `url` (default).""" + + +class ImagesResponse(BaseModel): + created: int + + data: List[Data] diff --git a/src/digitalocean_genai_sdk/types/includable.py b/src/digitalocean_genai_sdk/types/includable.py new file mode 100644 index 00000000..8b4920a2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/includable.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["Includable"] + +Includable: TypeAlias = Literal[ + "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url" +] diff --git a/src/digitalocean_genai_sdk/types/input_content.py b/src/digitalocean_genai_sdk/types/input_content.py new file mode 100644 index 00000000..04e37845 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/input_content.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, TypeAlias + +from .._models import BaseModel + +__all__ = ["InputContent", "InputText", "InputImage", "InputFile"] + + +class InputText(BaseModel): + text: str + """The text input to the model.""" + + type: Literal["input_text"] + """The type of the input item. Always `input_text`.""" + + +class InputImage(BaseModel): + detail: Literal["high", "low", "auto"] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + type: Literal["input_image"] + """The type of the input item. Always `input_image`.""" + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] = None + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ + + +class InputFile(BaseModel): + type: Literal["input_file"] + """The type of the input item. Always `input_file`.""" + + file_data: Optional[str] = None + """The content of the file to be sent to the model.""" + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + filename: Optional[str] = None + """The name of the file to be sent to the model.""" + + +InputContent: TypeAlias = Union[InputText, InputImage, InputFile] diff --git a/src/digitalocean_genai_sdk/types/input_content_param.py b/src/digitalocean_genai_sdk/types/input_content_param.py new file mode 100644 index 00000000..ed0bdf62 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/input_content_param.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["InputContentParam", "InputText", "InputImage", "InputFile"] + + +class InputText(TypedDict, total=False): + text: Required[str] + """The text input to the model.""" + + type: Required[Literal["input_text"]] + """The type of the input item. Always `input_text`.""" + + +class InputImage(TypedDict, total=False): + detail: Required[Literal["high", "low", "auto"]] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + type: Required[Literal["input_image"]] + """The type of the input item. Always `input_image`.""" + + file_id: Optional[str] + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ + + +class InputFile(TypedDict, total=False): + type: Required[Literal["input_file"]] + """The type of the input item. Always `input_file`.""" + + file_data: str + """The content of the file to be sent to the model.""" + + file_id: str + """The ID of the file to be sent to the model.""" + + filename: str + """The name of the file to be sent to the model.""" + + +InputContentParam: TypeAlias = Union[InputText, InputImage, InputFile] diff --git a/src/digitalocean_genai_sdk/types/input_message.py b/src/digitalocean_genai_sdk/types/input_message.py new file mode 100644 index 00000000..4dc5526f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/input_message.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .input_content import InputContent + +__all__ = ["InputMessage"] + + +class InputMessage(BaseModel): + content: List[InputContent] + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Literal["user", "system", "developer"] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always set to `message`.""" diff --git a/src/digitalocean_genai_sdk/types/input_message_param.py b/src/digitalocean_genai_sdk/types/input_message_param.py new file mode 100644 index 00000000..388c54ca --- /dev/null +++ b/src/digitalocean_genai_sdk/types/input_message_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +from .input_content_param import InputContentParam + +__all__ = ["InputMessageParam"] + + +class InputMessageParam(TypedDict, total=False): + content: Required[Iterable[InputContentParam]] + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Required[Literal["user", "system", "developer"]] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["message"] + """The type of the message input. Always set to `message`.""" diff --git a/src/digitalocean_genai_sdk/types/model.py b/src/digitalocean_genai_sdk/types/model.py new file mode 100644 index 00000000..2631ee8d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/model.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["Model"] + + +class Model(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" diff --git a/src/digitalocean_genai_sdk/types/model_delete_response.py b/src/digitalocean_genai_sdk/types/model_delete_response.py new file mode 100644 index 00000000..63b2d296 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/model_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["ModelDeleteResponse"] + + +class ModelDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: str diff --git a/src/digitalocean_genai_sdk/types/model_list_response.py b/src/digitalocean_genai_sdk/types/model_list_response.py new file mode 100644 index 00000000..8f835449 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/model_list_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from .model import Model +from .._models import BaseModel + +__all__ = ["ModelListResponse"] + + +class ModelListResponse(BaseModel): + data: List[Model] + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/model_response_properties.py b/src/digitalocean_genai_sdk/types/model_response_properties.py new file mode 100644 index 00000000..547c6391 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/model_response_properties.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional + +from .._models import BaseModel + +__all__ = ["ModelResponseProperties"] + + +class ModelResponseProperties(BaseModel): + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + temperature: Optional[float] = None + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + top_p: Optional[float] = None + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: Optional[str] = None + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_params.py b/src/digitalocean_genai_sdk/types/moderation_classify_params.py new file mode 100644 index 00000000..bcc99a1e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/moderation_classify_params.py @@ -0,0 +1,59 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "ModerationClassifyParams", + "InputUnionMember2", + "InputUnionMember2UnionMember0", + "InputUnionMember2UnionMember0ImageURL", + "InputUnionMember2UnionMember1", +] + + +class ModerationClassifyParams(TypedDict, total=False): + input: Required[Union[str, List[str], Iterable[InputUnionMember2]]] + """Input (or inputs) to classify. + + Can be a single string, an array of strings, or an array of multi-modal input + objects similar to other models. + """ + + model: Union[ + str, + Literal[ + "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable" + ], + ] + """The content moderation model you would like to use. + + Learn more in [the moderation guide](/docs/guides/moderation), and learn about + available models [here](/docs/models#moderation). + """ + + +class InputUnionMember2UnionMember0ImageURL(TypedDict, total=False): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + +class InputUnionMember2UnionMember0(TypedDict, total=False): + image_url: Required[InputUnionMember2UnionMember0ImageURL] + """Contains either an image URL or a data URL for a base64 encoded image.""" + + type: Required[Literal["image_url"]] + """Always `image_url`.""" + + +class InputUnionMember2UnionMember1(TypedDict, total=False): + text: Required[str] + """A string of text to classify.""" + + type: Required[Literal["text"]] + """Always `text`.""" + + +InputUnionMember2: TypeAlias = Union[InputUnionMember2UnionMember0, InputUnionMember2UnionMember1] diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_response.py b/src/digitalocean_genai_sdk/types/moderation_classify_response.py new file mode 100644 index 00000000..cfda7318 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/moderation_classify_response.py @@ -0,0 +1,203 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel + +__all__ = [ + "ModerationClassifyResponse", + "Result", + "ResultCategories", + "ResultCategoryAppliedInputTypes", + "ResultCategoryScores", +] + + +class ResultCategories(BaseModel): + harassment: bool + """ + Content that expresses, incites, or promotes harassing language towards any + target. + """ + + harassment_threatening: bool = FieldInfo(alias="harassment/threatening") + """ + Harassment content that also includes violence or serious harm towards any + target. + """ + + hate: bool + """ + Content that expresses, incites, or promotes hate based on race, gender, + ethnicity, religion, nationality, sexual orientation, disability status, or + caste. Hateful content aimed at non-protected groups (e.g., chess players) is + harassment. + """ + + hate_threatening: bool = FieldInfo(alias="hate/threatening") + """ + Hateful content that also includes violence or serious harm towards the targeted + group based on race, gender, ethnicity, religion, nationality, sexual + orientation, disability status, or caste. + """ + + illicit: Optional[bool] = None + """ + Content that includes instructions or advice that facilitate the planning or + execution of wrongdoing, or that gives advice or instruction on how to commit + illicit acts. For example, "how to shoplift" would fit this category. + """ + + illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None) + """ + Content that includes instructions or advice that facilitate the planning or + execution of wrongdoing that also includes violence, or that gives advice or + instruction on the procurement of any weapon. + """ + + self_harm: bool = FieldInfo(alias="self-harm") + """ + Content that promotes, encourages, or depicts acts of self-harm, such as + suicide, cutting, and eating disorders. + """ + + self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions") + """ + Content that encourages performing acts of self-harm, such as suicide, cutting, + and eating disorders, or that gives instructions or advice on how to commit such + acts. + """ + + self_harm_intent: bool = FieldInfo(alias="self-harm/intent") + """ + Content where the speaker expresses that they are engaging or intend to engage + in acts of self-harm, such as suicide, cutting, and eating disorders. + """ + + sexual: bool + """ + Content meant to arouse sexual excitement, such as the description of sexual + activity, or that promotes sexual services (excluding sex education and + wellness). + """ + + sexual_minors: bool = FieldInfo(alias="sexual/minors") + """Sexual content that includes an individual who is under 18 years old.""" + + violence: bool + """Content that depicts death, violence, or physical injury.""" + + violence_graphic: bool = FieldInfo(alias="violence/graphic") + """Content that depicts death, violence, or physical injury in graphic detail.""" + + +class ResultCategoryAppliedInputTypes(BaseModel): + harassment: List[Literal["text"]] + """The applied input type(s) for the category 'harassment'.""" + + harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening") + """The applied input type(s) for the category 'harassment/threatening'.""" + + hate: List[Literal["text"]] + """The applied input type(s) for the category 'hate'.""" + + hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening") + """The applied input type(s) for the category 'hate/threatening'.""" + + illicit: List[Literal["text"]] + """The applied input type(s) for the category 'illicit'.""" + + illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent") + """The applied input type(s) for the category 'illicit/violent'.""" + + self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm") + """The applied input type(s) for the category 'self-harm'.""" + + self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions") + """The applied input type(s) for the category 'self-harm/instructions'.""" + + self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent") + """The applied input type(s) for the category 'self-harm/intent'.""" + + sexual: List[Literal["text", "image"]] + """The applied input type(s) for the category 'sexual'.""" + + sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors") + """The applied input type(s) for the category 'sexual/minors'.""" + + violence: List[Literal["text", "image"]] + """The applied input type(s) for the category 'violence'.""" + + violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic") + """The applied input type(s) for the category 'violence/graphic'.""" + + +class ResultCategoryScores(BaseModel): + harassment: float + """The score for the category 'harassment'.""" + + harassment_threatening: float = FieldInfo(alias="harassment/threatening") + """The score for the category 'harassment/threatening'.""" + + hate: float + """The score for the category 'hate'.""" + + hate_threatening: float = FieldInfo(alias="hate/threatening") + """The score for the category 'hate/threatening'.""" + + illicit: float + """The score for the category 'illicit'.""" + + illicit_violent: float = FieldInfo(alias="illicit/violent") + """The score for the category 'illicit/violent'.""" + + self_harm: float = FieldInfo(alias="self-harm") + """The score for the category 'self-harm'.""" + + self_harm_instructions: float = FieldInfo(alias="self-harm/instructions") + """The score for the category 'self-harm/instructions'.""" + + self_harm_intent: float = FieldInfo(alias="self-harm/intent") + """The score for the category 'self-harm/intent'.""" + + sexual: float + """The score for the category 'sexual'.""" + + sexual_minors: float = FieldInfo(alias="sexual/minors") + """The score for the category 'sexual/minors'.""" + + violence: float + """The score for the category 'violence'.""" + + violence_graphic: float = FieldInfo(alias="violence/graphic") + """The score for the category 'violence/graphic'.""" + + +class Result(BaseModel): + categories: ResultCategories + """A list of the categories, and whether they are flagged or not.""" + + category_applied_input_types: ResultCategoryAppliedInputTypes + """ + A list of the categories along with the input type(s) that the score applies to. + """ + + category_scores: ResultCategoryScores + """A list of the categories along with their scores as predicted by model.""" + + flagged: bool + """Whether any of the below categories are flagged.""" + + +class ModerationClassifyResponse(BaseModel): + id: str + """The unique identifier for the moderation request.""" + + model: str + """The model used to generate the moderation results.""" + + results: List[Result] + """A list of moderation objects.""" diff --git a/src/digitalocean_genai_sdk/types/openai_file.py b/src/digitalocean_genai_sdk/types/openai_file.py new file mode 100644 index 00000000..a8398a35 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/openai_file.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["OpenAIFile"] + + +class OpenAIFile(BaseModel): + id: str + """The file identifier, which can be referenced in the API endpoints.""" + + bytes: int + """The size of the file, in bytes.""" + + created_at: int + """The Unix timestamp (in seconds) for when the file was created.""" + + filename: str + """The name of the file.""" + + object: Literal["file"] + """The object type, which is always `file`.""" + + purpose: Literal[ + "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision" + ] + """The intended purpose of the file. + + Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, + `fine-tune`, `fine-tune-results` and `vision`. + """ + + status: Literal["uploaded", "processed", "error"] + """Deprecated. + + The current status of the file, which can be either `uploaded`, `processed`, or + `error`. + """ + + expires_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the file will expire.""" + + status_details: Optional[str] = None + """Deprecated. + + For details on why a fine-tuning training file failed validation, see the + `error` field on `fine_tuning.job`. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/__init__.py b/src/digitalocean_genai_sdk/types/organization/__init__.py new file mode 100644 index 00000000..5b34f495 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/__init__.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .invite import Invite as Invite +from .project import Project as Project +from .admin_api_key import AdminAPIKey as AdminAPIKey +from .user_list_params import UserListParams as UserListParams +from .organization_user import OrganizationUser as OrganizationUser +from .invite_list_params import InviteListParams as InviteListParams +from .user_list_response import UserListResponse as UserListResponse +from .user_update_params import UserUpdateParams as UserUpdateParams +from .project_list_params import ProjectListParams as ProjectListParams +from .usage_images_params import UsageImagesParams as UsageImagesParams +from .invite_create_params import InviteCreateParams as InviteCreateParams +from .invite_list_response import InviteListResponse as InviteListResponse +from .user_delete_response import UserDeleteResponse as UserDeleteResponse +from .project_create_params import ProjectCreateParams as ProjectCreateParams +from .project_list_response import ProjectListResponse as ProjectListResponse +from .project_update_params import ProjectUpdateParams as ProjectUpdateParams +from .invite_delete_response import InviteDeleteResponse as InviteDeleteResponse +from .usage_embeddings_params import UsageEmbeddingsParams as UsageEmbeddingsParams +from .usage_completions_params import UsageCompletionsParams as UsageCompletionsParams +from .usage_moderations_params import UsageModerationsParams as UsageModerationsParams +from .admin_api_key_list_params import AdminAPIKeyListParams as AdminAPIKeyListParams +from .usage_vector_stores_params import UsageVectorStoresParams as UsageVectorStoresParams +from .admin_api_key_create_params import AdminAPIKeyCreateParams as AdminAPIKeyCreateParams +from .admin_api_key_list_response import AdminAPIKeyListResponse as AdminAPIKeyListResponse +from .usage_audio_speeches_params import UsageAudioSpeechesParams as UsageAudioSpeechesParams +from .admin_api_key_delete_response import AdminAPIKeyDeleteResponse as AdminAPIKeyDeleteResponse +from .usage_audio_transcriptions_params import UsageAudioTranscriptionsParams as UsageAudioTranscriptionsParams +from .usage_code_interpreter_sessions_params import ( + UsageCodeInterpreterSessionsParams as UsageCodeInterpreterSessionsParams, +) diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key.py new file mode 100644 index 00000000..8a57458f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/admin_api_key.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["AdminAPIKey", "Owner"] + + +class Owner(BaseModel): + id: Optional[str] = None + + created_at: Optional[int] = None + + name: Optional[str] = None + + role: Optional[str] = None + + type: Optional[str] = None + + +class AdminAPIKey(BaseModel): + id: Optional[str] = None + + created_at: Optional[int] = None + + name: Optional[str] = None + + object: Optional[str] = None + + owner: Optional[Owner] = None + + redacted_value: Optional[str] = None + + value: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py new file mode 100644 index 00000000..dccdfb8a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["AdminAPIKeyCreateParams"] + + +class AdminAPIKeyCreateParams(TypedDict, total=False): + name: Required[str] diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py new file mode 100644 index 00000000..b752558c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["AdminAPIKeyDeleteResponse"] + + +class AdminAPIKeyDeleteResponse(BaseModel): + id: Optional[str] = None + + deleted: Optional[bool] = None + + object: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py new file mode 100644 index 00000000..c3b3f510 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, TypedDict + +__all__ = ["AdminAPIKeyListParams"] + + +class AdminAPIKeyListParams(TypedDict, total=False): + after: Optional[str] + """Return keys with IDs that come after this ID in the pagination order.""" + + limit: int + """Maximum number of keys to return.""" + + order: Literal["asc", "desc"] + """Order results by creation time, ascending or descending.""" diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py new file mode 100644 index 00000000..8ef9beb7 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .admin_api_key import AdminAPIKey + +__all__ = ["AdminAPIKeyListResponse"] + + +class AdminAPIKeyListResponse(BaseModel): + data: Optional[List[AdminAPIKey]] = None + + first_id: Optional[str] = None + + has_more: Optional[bool] = None + + last_id: Optional[str] = None + + object: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/organization/invite.py b/src/digitalocean_genai_sdk/types/organization/invite.py new file mode 100644 index 00000000..fd495caf --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/invite.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Invite", "Project"] + + +class Project(BaseModel): + id: Optional[str] = None + """Project's public ID""" + + role: Optional[Literal["member", "owner"]] = None + """Project membership role""" + + +class Invite(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints""" + + email: str + """The email address of the individual to whom the invite was sent""" + + expires_at: int + """The Unix timestamp (in seconds) of when the invite expires.""" + + invited_at: int + """The Unix timestamp (in seconds) of when the invite was sent.""" + + object: Literal["organization.invite"] + """The object type, which is always `organization.invite`""" + + role: Literal["owner", "reader"] + """`owner` or `reader`""" + + status: Literal["accepted", "expired", "pending"] + """`accepted`,`expired`, or `pending`""" + + accepted_at: Optional[int] = None + """The Unix timestamp (in seconds) of when the invite was accepted.""" + + projects: Optional[List[Project]] = None + """The projects that were granted membership upon acceptance of the invite.""" diff --git a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py b/src/digitalocean_genai_sdk/types/organization/invite_create_params.py new file mode 100644 index 00000000..7709003f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/invite_create_params.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InviteCreateParams", "Project"] + + +class InviteCreateParams(TypedDict, total=False): + email: Required[str] + """Send an email to this address""" + + role: Required[Literal["reader", "owner"]] + """`owner` or `reader`""" + + projects: Iterable[Project] + """ + An array of projects to which membership is granted at the same time the org + invite is accepted. If omitted, the user will be invited to the default project + for compatibility with legacy behavior. + """ + + +class Project(TypedDict, total=False): + id: Required[str] + """Project's public ID""" + + role: Required[Literal["member", "owner"]] + """Project membership role""" diff --git a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py b/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py new file mode 100644 index 00000000..52bd47b9 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InviteDeleteResponse"] + + +class InviteDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["organization.invite.deleted"] + """The object type, which is always `organization.invite.deleted`""" diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py b/src/digitalocean_genai_sdk/types/organization/invite_list_params.py new file mode 100644 index 00000000..678510d6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/invite_list_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["InviteListParams"] + + +class InviteListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py b/src/digitalocean_genai_sdk/types/organization/invite_list_response.py new file mode 100644 index 00000000..2b646289 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/invite_list_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .invite import Invite +from ..._models import BaseModel + +__all__ = ["InviteListResponse"] + + +class InviteListResponse(BaseModel): + data: List[Invite] + + object: Literal["list"] + """The object type, which is always `list`""" + + first_id: Optional[str] = None + """The first `invite_id` in the retrieved `list`""" + + has_more: Optional[bool] = None + """ + The `has_more` property is used for pagination to indicate there are additional + results. + """ + + last_id: Optional[str] = None + """The last `invite_id` in the retrieved `list`""" diff --git a/src/digitalocean_genai_sdk/types/organization/organization_user.py b/src/digitalocean_genai_sdk/types/organization/organization_user.py new file mode 100644 index 00000000..890833f1 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/organization_user.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["OrganizationUser"] + + +class OrganizationUser(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints""" + + added_at: int + """The Unix timestamp (in seconds) of when the user was added.""" + + email: str + """The email address of the user""" + + name: str + """The name of the user""" + + object: Literal["organization.user"] + """The object type, which is always `organization.user`""" + + role: Literal["owner", "reader"] + """`owner` or `reader`""" diff --git a/src/digitalocean_genai_sdk/types/organization/project.py b/src/digitalocean_genai_sdk/types/organization/project.py new file mode 100644 index 00000000..731e8609 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/project.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Project"] + + +class Project(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints""" + + created_at: int + """The Unix timestamp (in seconds) of when the project was created.""" + + name: str + """The name of the project. This appears in reporting.""" + + object: Literal["organization.project"] + """The object type, which is always `organization.project`""" + + status: Literal["active", "archived"] + """`active` or `archived`""" + + archived_at: Optional[int] = None + """The Unix timestamp (in seconds) of when the project was archived or `null`.""" diff --git a/src/digitalocean_genai_sdk/types/organization/project_create_params.py b/src/digitalocean_genai_sdk/types/organization/project_create_params.py new file mode 100644 index 00000000..0c18bc5b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/project_create_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ProjectCreateParams"] + + +class ProjectCreateParams(TypedDict, total=False): + name: Required[str] + """The friendly name of the project, this name appears in reports.""" diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_params.py b/src/digitalocean_genai_sdk/types/organization/project_list_params.py new file mode 100644 index 00000000..f55fb8a3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/project_list_params.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ProjectListParams"] + + +class ProjectListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + include_archived: bool + """If `true` returns all projects including those that have been `archived`. + + Archived projects are not included by default. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_response.py b/src/digitalocean_genai_sdk/types/organization/project_list_response.py new file mode 100644 index 00000000..24a79f63 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/project_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from .project import Project +from ..._models import BaseModel + +__all__ = ["ProjectListResponse"] + + +class ProjectListResponse(BaseModel): + data: List[Project] + + first_id: str + + has_more: bool + + last_id: str + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/project_update_params.py b/src/digitalocean_genai_sdk/types/organization/project_update_params.py new file mode 100644 index 00000000..0ba1984a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/project_update_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ProjectUpdateParams"] + + +class ProjectUpdateParams(TypedDict, total=False): + name: Required[str] + """The updated name of the project, this name appears in reports.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py new file mode 100644 index 00000000..4b0e0f9b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_key import APIKey as APIKey +from .rate_limit import RateLimit as RateLimit +from .project_user import ProjectUser as ProjectUser +from .service_account import ServiceAccount as ServiceAccount +from .user_add_params import UserAddParams as UserAddParams +from .user_list_params import UserListParams as UserListParams +from .user_list_response import UserListResponse as UserListResponse +from .user_update_params import UserUpdateParams as UserUpdateParams +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .user_delete_response import UserDeleteResponse as UserDeleteResponse +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .rate_limit_list_params import RateLimitListParams as RateLimitListParams +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .rate_limit_list_response import RateLimitListResponse as RateLimitListResponse +from .rate_limit_update_params import RateLimitUpdateParams as RateLimitUpdateParams +from .service_account_list_params import ServiceAccountListParams as ServiceAccountListParams +from .service_account_create_params import ServiceAccountCreateParams as ServiceAccountCreateParams +from .service_account_list_response import ServiceAccountListResponse as ServiceAccountListResponse +from .service_account_create_response import ServiceAccountCreateResponse as ServiceAccountCreateResponse +from .service_account_delete_response import ServiceAccountDeleteResponse as ServiceAccountDeleteResponse diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key.py new file mode 100644 index 00000000..276f6d9b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/api_key.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .project_user import ProjectUser +from .service_account import ServiceAccount + +__all__ = ["APIKey", "Owner"] + + +class Owner(BaseModel): + service_account: Optional[ServiceAccount] = None + """Represents an individual service account in a project.""" + + type: Optional[Literal["user", "service_account"]] = None + """`user` or `service_account`""" + + user: Optional[ProjectUser] = None + """Represents an individual user in a project.""" + + +class APIKey(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints""" + + created_at: int + """The Unix timestamp (in seconds) of when the API key was created""" + + name: str + """The name of the API key""" + + object: Literal["organization.project.api_key"] + """The object type, which is always `organization.project.api_key`""" + + owner: Owner + + redacted_value: str + """The redacted value of the API key""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py new file mode 100644 index 00000000..c3ec64bd --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["organization.project.api_key.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py new file mode 100644 index 00000000..422a2851 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py new file mode 100644 index 00000000..669de6c6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from .api_key import APIKey +from ...._models import BaseModel + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + data: List[APIKey] + + first_id: str + + has_more: bool + + last_id: str + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py b/src/digitalocean_genai_sdk/types/organization/projects/project_user.py new file mode 100644 index 00000000..afcdb514 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/project_user.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ProjectUser"] + + +class ProjectUser(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints""" + + added_at: int + """The Unix timestamp (in seconds) of when the project was added.""" + + email: str + """The email address of the user""" + + name: str + """The name of the user""" + + object: Literal["organization.project.user"] + """The object type, which is always `organization.project.user`""" + + role: Literal["owner", "member"] + """`owner` or `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py new file mode 100644 index 00000000..1a9795f5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RateLimit"] + + +class RateLimit(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + max_requests_per_1_minute: int + """The maximum requests per minute.""" + + max_tokens_per_1_minute: int + """The maximum tokens per minute.""" + + model: str + """The model this rate limit applies to.""" + + object: Literal["project.rate_limit"] + """The object type, which is always `project.rate_limit`""" + + batch_1_day_max_input_tokens: Optional[int] = None + """The maximum batch input tokens per day. Only present for relevant models.""" + + max_audio_megabytes_per_1_minute: Optional[int] = None + """The maximum audio megabytes per minute. Only present for relevant models.""" + + max_images_per_1_minute: Optional[int] = None + """The maximum images per minute. Only present for relevant models.""" + + max_requests_per_1_day: Optional[int] = None + """The maximum requests per day. Only present for relevant models.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py new file mode 100644 index 00000000..aa007e5f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["RateLimitListParams"] + + +class RateLimitListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, beginning with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. The default is 100.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py new file mode 100644 index 00000000..f2133f3e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ...._models import BaseModel +from .rate_limit import RateLimit + +__all__ = ["RateLimitListResponse"] + + +class RateLimitListResponse(BaseModel): + data: List[RateLimit] + + first_id: str + + has_more: bool + + last_id: str + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py new file mode 100644 index 00000000..a303d6f4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["RateLimitUpdateParams"] + + +class RateLimitUpdateParams(TypedDict, total=False): + project_id: Required[str] + + batch_1_day_max_input_tokens: int + """The maximum batch input tokens per day. Only relevant for certain models.""" + + max_audio_megabytes_per_1_minute: int + """The maximum audio megabytes per minute. Only relevant for certain models.""" + + max_images_per_1_minute: int + """The maximum images per minute. Only relevant for certain models.""" + + max_requests_per_1_day: int + """The maximum requests per day. Only relevant for certain models.""" + + max_requests_per_1_minute: int + """The maximum requests per minute.""" + + max_tokens_per_1_minute: int + """The maximum tokens per minute.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account.py new file mode 100644 index 00000000..9200ba11 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/service_account.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ServiceAccount"] + + +class ServiceAccount(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints""" + + created_at: int + """The Unix timestamp (in seconds) of when the service account was created""" + + name: str + """The name of the service account""" + + object: Literal["organization.project.service_account"] + """The object type, which is always `organization.project.service_account`""" + + role: Literal["owner", "member"] + """`owner` or `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py new file mode 100644 index 00000000..409dcba5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ServiceAccountCreateParams"] + + +class ServiceAccountCreateParams(TypedDict, total=False): + name: Required[str] + """The name of the service account being created.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py new file mode 100644 index 00000000..e7757a8a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ServiceAccountCreateResponse", "APIKey"] + + +class APIKey(BaseModel): + id: str + + created_at: int + + name: str + + object: Literal["organization.project.service_account.api_key"] + """The object type, which is always `organization.project.service_account.api_key`""" + + value: str + + +class ServiceAccountCreateResponse(BaseModel): + id: str + + api_key: APIKey + + created_at: int + + name: str + + object: Literal["organization.project.service_account"] + + role: Literal["member"] + """Service accounts can only have one role of type `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py new file mode 100644 index 00000000..28d04e10 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ServiceAccountDeleteResponse"] + + +class ServiceAccountDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["organization.project.service_account.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py new file mode 100644 index 00000000..7f808e28 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ServiceAccountListParams"] + + +class ServiceAccountListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py new file mode 100644 index 00000000..0818c8c8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ...._models import BaseModel +from .service_account import ServiceAccount + +__all__ = ["ServiceAccountListResponse"] + + +class ServiceAccountListResponse(BaseModel): + data: List[ServiceAccount] + + first_id: str + + has_more: bool + + last_id: str + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py new file mode 100644 index 00000000..85f38c0c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UserAddParams"] + + +class UserAddParams(TypedDict, total=False): + role: Required[Literal["owner", "member"]] + """`owner` or `member`""" + + user_id: Required[str] + """The ID of the user.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py new file mode 100644 index 00000000..7ac68cc5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["UserDeleteResponse"] + + +class UserDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["organization.project.user.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py new file mode 100644 index 00000000..d561e907 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["UserListParams"] + + +class UserListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py new file mode 100644 index 00000000..1f8993ad --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ...._models import BaseModel +from .project_user import ProjectUser + +__all__ = ["UserListResponse"] + + +class UserListResponse(BaseModel): + data: List[ProjectUser] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py new file mode 100644 index 00000000..08b3e1a4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UserUpdateParams"] + + +class UserUpdateParams(TypedDict, total=False): + project_id: Required[str] + + role: Required[Literal["owner", "member"]] + """`owner` or `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py new file mode 100644 index 00000000..819ffc37 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageAudioSpeechesParams"] + + +class UsageAudioSpeechesParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + api_key_ids: List[str] + """Return only usage for these API keys.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + models: List[str] + """Return only usage for these models.""" + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" + + user_ids: List[str] + """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py new file mode 100644 index 00000000..318f85a3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageAudioTranscriptionsParams"] + + +class UsageAudioTranscriptionsParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + api_key_ids: List[str] + """Return only usage for these API keys.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + models: List[str] + """Return only usage for these models.""" + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" + + user_ids: List[str] + """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py new file mode 100644 index 00000000..24322abe --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageCodeInterpreterSessionsParams"] + + +class UsageCodeInterpreterSessionsParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py new file mode 100644 index 00000000..8bd94d39 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageCompletionsParams"] + + +class UsageCompletionsParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + api_key_ids: List[str] + """Return only usage for these API keys.""" + + batch: bool + """If `true`, return batch jobs only. + + If `false`, return non-batch jobs only. By default, return both. + """ + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `batch` + or any combination of them. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + models: List[str] + """Return only usage for these models.""" + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" + + user_ids: List[str] + """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py b/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py new file mode 100644 index 00000000..c4a71264 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageEmbeddingsParams"] + + +class UsageEmbeddingsParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + api_key_ids: List[str] + """Return only usage for these API keys.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + models: List[str] + """Return only usage for these models.""" + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" + + user_ids: List[str] + """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py b/src/digitalocean_genai_sdk/types/organization/usage_images_params.py new file mode 100644 index 00000000..31f2a31f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_images_params.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageImagesParams"] + + +class UsageImagesParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + api_key_ids: List[str] + """Return only usage for these API keys.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `size`, + `source` or any combination of them. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + models: List[str] + """Return only usage for these models.""" + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" + + sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] + """Return only usages for these image sizes. + + Possible values are `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792` + or any combination of them. + """ + + sources: List[Literal["image.generation", "image.edit", "image.variation"]] + """Return only usages for these sources. + + Possible values are `image.generation`, `image.edit`, `image.variation` or any + combination of them. + """ + + user_ids: List[str] + """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py b/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py new file mode 100644 index 00000000..438fca8f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageModerationsParams"] + + +class UsageModerationsParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + api_key_ids: List[str] + """Return only usage for these API keys.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + models: List[str] + """Return only usage for these models.""" + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" + + user_ids: List[str] + """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py b/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py new file mode 100644 index 00000000..dc25f126 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UsageVectorStoresParams"] + + +class UsageVectorStoresParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + bucket_width: Literal["1m", "1h", "1d"] + """Width of each time bucket in response. + + Currently `1m`, `1h` and `1d` are supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id"]] + """Group the usage data by the specified fields. + + Support fields include `project_id`. + """ + + limit: int + """Specifies the number of buckets to return. + + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + """ + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only usage for these projects.""" diff --git a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/user_delete_response.py new file mode 100644 index 00000000..5baab3bf --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/user_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["UserDeleteResponse"] + + +class UserDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["organization.user.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/user_list_params.py new file mode 100644 index 00000000..c7ad6c74 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/user_list_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["UserListParams"] + + +class UserListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + emails: List[str] + """Filter by the email address of users.""" + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/user_list_response.py new file mode 100644 index 00000000..73aaf45b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/user_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .organization_user import OrganizationUser + +__all__ = ["UserListResponse"] + + +class UserListResponse(BaseModel): + data: List[OrganizationUser] + + first_id: str + + has_more: bool + + last_id: str + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/user_update_params.py new file mode 100644 index 00000000..bc276120 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization/user_update_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UserUpdateParams"] + + +class UserUpdateParams(TypedDict, total=False): + role: Required[Literal["owner", "reader"]] + """`owner` or `reader`""" diff --git a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py b/src/digitalocean_genai_sdk/types/organization_get_costs_params.py new file mode 100644 index 00000000..e114aa0f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization_get_costs_params.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["OrganizationGetCostsParams"] + + +class OrganizationGetCostsParams(TypedDict, total=False): + start_time: Required[int] + """Start time (Unix seconds) of the query time range, inclusive.""" + + bucket_width: Literal["1d"] + """Width of each time bucket in response. + + Currently only `1d` is supported, default to `1d`. + """ + + end_time: int + """End time (Unix seconds) of the query time range, exclusive.""" + + group_by: List[Literal["project_id", "line_item"]] + """Group the costs by the specified fields. + + Support fields include `project_id`, `line_item` and any combination of them. + """ + + limit: int + """A limit on the number of buckets to be returned. + + Limit can range between 1 and 180, and the default is 7. + """ + + page: str + """A cursor for use in pagination. + + Corresponding to the `next_page` field from the previous response. + """ + + project_ids: List[str] + """Return only costs for these projects.""" diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py new file mode 100644 index 00000000..36b79e57 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py @@ -0,0 +1,87 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +from .audit_log_event_type import AuditLogEventType + +__all__ = ["OrganizationListAuditLogsParams", "EffectiveAt"] + + +class OrganizationListAuditLogsParams(TypedDict, total=False): + actor_emails: List[str] + """Return only events performed by users with these emails.""" + + actor_ids: List[str] + """Return only events performed by these actors. + + Can be a user ID, a service account ID, or an api key tracking ID. + """ + + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + effective_at: EffectiveAt + """Return only events whose `effective_at` (Unix seconds) is in this range.""" + + event_types: List[AuditLogEventType] + """Return only events with a `type` in one of these values. + + For example, `project.created`. For all options, see the documentation for the + [audit log object](/docs/api-reference/audit-logs/object). + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + project_ids: List[str] + """Return only events for these projects.""" + + resource_ids: List[str] + """Return only events performed on these targets. + + For example, a project ID updated. + """ + + +class EffectiveAt(TypedDict, total=False): + gt: int + """ + Return only events whose `effective_at` (Unix seconds) is greater than this + value. + """ + + gte: int + """ + Return only events whose `effective_at` (Unix seconds) is greater than or equal + to this value. + """ + + lt: int + """Return only events whose `effective_at` (Unix seconds) is less than this value.""" + + lte: int + """ + Return only events whose `effective_at` (Unix seconds) is less than or equal to + this value. + """ diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py new file mode 100644 index 00000000..751ec527 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py @@ -0,0 +1,433 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .audit_log_actor_user import AuditLogActorUser +from .audit_log_event_type import AuditLogEventType + +__all__ = [ + "OrganizationListAuditLogsResponse", + "Data", + "DataActor", + "DataActorAPIKey", + "DataActorAPIKeyServiceAccount", + "DataActorSession", + "DataAPIKeyCreated", + "DataAPIKeyCreatedData", + "DataAPIKeyDeleted", + "DataAPIKeyUpdated", + "DataAPIKeyUpdatedChangesRequested", + "DataInviteAccepted", + "DataInviteDeleted", + "DataInviteSent", + "DataInviteSentData", + "DataLoginFailed", + "DataLogoutFailed", + "DataOrganizationUpdated", + "DataOrganizationUpdatedChangesRequested", + "DataOrganizationUpdatedChangesRequestedSettings", + "DataProject", + "DataProjectArchived", + "DataProjectCreated", + "DataProjectCreatedData", + "DataProjectUpdated", + "DataProjectUpdatedChangesRequested", + "DataRateLimitDeleted", + "DataRateLimitUpdated", + "DataRateLimitUpdatedChangesRequested", + "DataServiceAccountCreated", + "DataServiceAccountCreatedData", + "DataServiceAccountDeleted", + "DataServiceAccountUpdated", + "DataServiceAccountUpdatedChangesRequested", + "DataUserAdded", + "DataUserAddedData", + "DataUserDeleted", + "DataUserUpdated", + "DataUserUpdatedChangesRequested", +] + + +class DataActorAPIKeyServiceAccount(BaseModel): + id: Optional[str] = None + """The service account id.""" + + +class DataActorAPIKey(BaseModel): + id: Optional[str] = None + """The tracking id of the API key.""" + + service_account: Optional[DataActorAPIKeyServiceAccount] = None + """The service account that performed the audit logged action.""" + + type: Optional[Literal["user", "service_account"]] = None + """The type of API key. Can be either `user` or `service_account`.""" + + user: Optional[AuditLogActorUser] = None + """The user who performed the audit logged action.""" + + +class DataActorSession(BaseModel): + ip_address: Optional[str] = None + """The IP address from which the action was performed.""" + + user: Optional[AuditLogActorUser] = None + """The user who performed the audit logged action.""" + + +class DataActor(BaseModel): + api_key: Optional[DataActorAPIKey] = None + """The API Key used to perform the audit logged action.""" + + session: Optional[DataActorSession] = None + """The session in which the audit logged action was performed.""" + + type: Optional[Literal["session", "api_key"]] = None + """The type of actor. Is either `session` or `api_key`.""" + + +class DataAPIKeyCreatedData(BaseModel): + scopes: Optional[List[str]] = None + """A list of scopes allowed for the API key, e.g. `["api.model.request"]`""" + + +class DataAPIKeyCreated(BaseModel): + id: Optional[str] = None + """The tracking ID of the API key.""" + + data: Optional[DataAPIKeyCreatedData] = None + """The payload used to create the API key.""" + + +class DataAPIKeyDeleted(BaseModel): + id: Optional[str] = None + """The tracking ID of the API key.""" + + +class DataAPIKeyUpdatedChangesRequested(BaseModel): + scopes: Optional[List[str]] = None + """A list of scopes allowed for the API key, e.g. `["api.model.request"]`""" + + +class DataAPIKeyUpdated(BaseModel): + id: Optional[str] = None + """The tracking ID of the API key.""" + + changes_requested: Optional[DataAPIKeyUpdatedChangesRequested] = None + """The payload used to update the API key.""" + + +class DataInviteAccepted(BaseModel): + id: Optional[str] = None + """The ID of the invite.""" + + +class DataInviteDeleted(BaseModel): + id: Optional[str] = None + """The ID of the invite.""" + + +class DataInviteSentData(BaseModel): + email: Optional[str] = None + """The email invited to the organization.""" + + role: Optional[str] = None + """The role the email was invited to be. Is either `owner` or `member`.""" + + +class DataInviteSent(BaseModel): + id: Optional[str] = None + """The ID of the invite.""" + + data: Optional[DataInviteSentData] = None + """The payload used to create the invite.""" + + +class DataLoginFailed(BaseModel): + error_code: Optional[str] = None + """The error code of the failure.""" + + error_message: Optional[str] = None + """The error message of the failure.""" + + +class DataLogoutFailed(BaseModel): + error_code: Optional[str] = None + """The error code of the failure.""" + + error_message: Optional[str] = None + """The error message of the failure.""" + + +class DataOrganizationUpdatedChangesRequestedSettings(BaseModel): + threads_ui_visibility: Optional[str] = None + """ + Visibility of the threads page which shows messages created with the Assistants + API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. + """ + + usage_dashboard_visibility: Optional[str] = None + """ + Visibility of the usage dashboard which shows activity and costs for your + organization. One of `ANY_ROLE` or `OWNERS`. + """ + + +class DataOrganizationUpdatedChangesRequested(BaseModel): + description: Optional[str] = None + """The organization description.""" + + name: Optional[str] = None + """The organization name.""" + + settings: Optional[DataOrganizationUpdatedChangesRequestedSettings] = None + + title: Optional[str] = None + """The organization title.""" + + +class DataOrganizationUpdated(BaseModel): + id: Optional[str] = None + """The organization ID.""" + + changes_requested: Optional[DataOrganizationUpdatedChangesRequested] = None + """The payload used to update the organization settings.""" + + +class DataProject(BaseModel): + id: Optional[str] = None + """The project ID.""" + + name: Optional[str] = None + """The project title.""" + + +class DataProjectArchived(BaseModel): + id: Optional[str] = None + """The project ID.""" + + +class DataProjectCreatedData(BaseModel): + name: Optional[str] = None + """The project name.""" + + title: Optional[str] = None + """The title of the project as seen on the dashboard.""" + + +class DataProjectCreated(BaseModel): + id: Optional[str] = None + """The project ID.""" + + data: Optional[DataProjectCreatedData] = None + """The payload used to create the project.""" + + +class DataProjectUpdatedChangesRequested(BaseModel): + title: Optional[str] = None + """The title of the project as seen on the dashboard.""" + + +class DataProjectUpdated(BaseModel): + id: Optional[str] = None + """The project ID.""" + + changes_requested: Optional[DataProjectUpdatedChangesRequested] = None + """The payload used to update the project.""" + + +class DataRateLimitDeleted(BaseModel): + id: Optional[str] = None + """The rate limit ID""" + + +class DataRateLimitUpdatedChangesRequested(BaseModel): + batch_1_day_max_input_tokens: Optional[int] = None + """The maximum batch input tokens per day. Only relevant for certain models.""" + + max_audio_megabytes_per_1_minute: Optional[int] = None + """The maximum audio megabytes per minute. Only relevant for certain models.""" + + max_images_per_1_minute: Optional[int] = None + """The maximum images per minute. Only relevant for certain models.""" + + max_requests_per_1_day: Optional[int] = None + """The maximum requests per day. Only relevant for certain models.""" + + max_requests_per_1_minute: Optional[int] = None + """The maximum requests per minute.""" + + max_tokens_per_1_minute: Optional[int] = None + """The maximum tokens per minute.""" + + +class DataRateLimitUpdated(BaseModel): + id: Optional[str] = None + """The rate limit ID""" + + changes_requested: Optional[DataRateLimitUpdatedChangesRequested] = None + """The payload used to update the rate limits.""" + + +class DataServiceAccountCreatedData(BaseModel): + role: Optional[str] = None + """The role of the service account. Is either `owner` or `member`.""" + + +class DataServiceAccountCreated(BaseModel): + id: Optional[str] = None + """The service account ID.""" + + data: Optional[DataServiceAccountCreatedData] = None + """The payload used to create the service account.""" + + +class DataServiceAccountDeleted(BaseModel): + id: Optional[str] = None + """The service account ID.""" + + +class DataServiceAccountUpdatedChangesRequested(BaseModel): + role: Optional[str] = None + """The role of the service account. Is either `owner` or `member`.""" + + +class DataServiceAccountUpdated(BaseModel): + id: Optional[str] = None + """The service account ID.""" + + changes_requested: Optional[DataServiceAccountUpdatedChangesRequested] = None + """The payload used to updated the service account.""" + + +class DataUserAddedData(BaseModel): + role: Optional[str] = None + """The role of the user. Is either `owner` or `member`.""" + + +class DataUserAdded(BaseModel): + id: Optional[str] = None + """The user ID.""" + + data: Optional[DataUserAddedData] = None + """The payload used to add the user to the project.""" + + +class DataUserDeleted(BaseModel): + id: Optional[str] = None + """The user ID.""" + + +class DataUserUpdatedChangesRequested(BaseModel): + role: Optional[str] = None + """The role of the user. Is either `owner` or `member`.""" + + +class DataUserUpdated(BaseModel): + id: Optional[str] = None + """The project ID.""" + + changes_requested: Optional[DataUserUpdatedChangesRequested] = None + """The payload used to update the user.""" + + +class Data(BaseModel): + id: str + """The ID of this log.""" + + actor: DataActor + """The actor who performed the audit logged action.""" + + effective_at: int + """The Unix timestamp (in seconds) of the event.""" + + type: AuditLogEventType + """The event type.""" + + api_key_created: Optional[DataAPIKeyCreated] = FieldInfo(alias="api_key.created", default=None) + """The details for events with this `type`.""" + + api_key_deleted: Optional[DataAPIKeyDeleted] = FieldInfo(alias="api_key.deleted", default=None) + """The details for events with this `type`.""" + + api_key_updated: Optional[DataAPIKeyUpdated] = FieldInfo(alias="api_key.updated", default=None) + """The details for events with this `type`.""" + + invite_accepted: Optional[DataInviteAccepted] = FieldInfo(alias="invite.accepted", default=None) + """The details for events with this `type`.""" + + invite_deleted: Optional[DataInviteDeleted] = FieldInfo(alias="invite.deleted", default=None) + """The details for events with this `type`.""" + + invite_sent: Optional[DataInviteSent] = FieldInfo(alias="invite.sent", default=None) + """The details for events with this `type`.""" + + login_failed: Optional[DataLoginFailed] = FieldInfo(alias="login.failed", default=None) + """The details for events with this `type`.""" + + logout_failed: Optional[DataLogoutFailed] = FieldInfo(alias="logout.failed", default=None) + """The details for events with this `type`.""" + + organization_updated: Optional[DataOrganizationUpdated] = FieldInfo(alias="organization.updated", default=None) + """The details for events with this `type`.""" + + project: Optional[DataProject] = None + """The project that the action was scoped to. + + Absent for actions not scoped to projects. + """ + + project_archived: Optional[DataProjectArchived] = FieldInfo(alias="project.archived", default=None) + """The details for events with this `type`.""" + + project_created: Optional[DataProjectCreated] = FieldInfo(alias="project.created", default=None) + """The details for events with this `type`.""" + + project_updated: Optional[DataProjectUpdated] = FieldInfo(alias="project.updated", default=None) + """The details for events with this `type`.""" + + rate_limit_deleted: Optional[DataRateLimitDeleted] = FieldInfo(alias="rate_limit.deleted", default=None) + """The details for events with this `type`.""" + + rate_limit_updated: Optional[DataRateLimitUpdated] = FieldInfo(alias="rate_limit.updated", default=None) + """The details for events with this `type`.""" + + service_account_created: Optional[DataServiceAccountCreated] = FieldInfo( + alias="service_account.created", default=None + ) + """The details for events with this `type`.""" + + service_account_deleted: Optional[DataServiceAccountDeleted] = FieldInfo( + alias="service_account.deleted", default=None + ) + """The details for events with this `type`.""" + + service_account_updated: Optional[DataServiceAccountUpdated] = FieldInfo( + alias="service_account.updated", default=None + ) + """The details for events with this `type`.""" + + user_added: Optional[DataUserAdded] = FieldInfo(alias="user.added", default=None) + """The details for events with this `type`.""" + + user_deleted: Optional[DataUserDeleted] = FieldInfo(alias="user.deleted", default=None) + """The details for events with this `type`.""" + + user_updated: Optional[DataUserUpdated] = FieldInfo(alias="user.updated", default=None) + """The details for events with this `type`.""" + + +class OrganizationListAuditLogsResponse(BaseModel): + data: List[Data] + + first_id: str + + has_more: bool + + last_id: str + + object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/output_message.py b/src/digitalocean_genai_sdk/types/output_message.py new file mode 100644 index 00000000..4db6e72e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/output_message.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, TypeAlias + +from .._models import BaseModel + +__all__ = [ + "OutputMessage", + "Content", + "ContentOutputText", + "ContentOutputTextAnnotation", + "ContentOutputTextAnnotationFileCitation", + "ContentOutputTextAnnotationURLCitation", + "ContentOutputTextAnnotationFilePath", + "ContentRefusal", +] + + +class ContentOutputTextAnnotationFileCitation(BaseModel): + file_id: str + """The ID of the file.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_citation"] + """The type of the file citation. Always `file_citation`.""" + + +class ContentOutputTextAnnotationURLCitation(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url: str + """The URL of the web resource.""" + + +class ContentOutputTextAnnotationFilePath(BaseModel): + file_id: str + """The ID of the file.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_path"] + """The type of the file path. Always `file_path`.""" + + +ContentOutputTextAnnotation: TypeAlias = Union[ + ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath +] + + +class ContentOutputText(BaseModel): + annotations: List[ContentOutputTextAnnotation] + """The annotations of the text output.""" + + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +class ContentRefusal(BaseModel): + refusal: str + """The refusal explanationfrom the model.""" + + type: Literal["refusal"] + """The type of the refusal. Always `refusal`.""" + + +Content: TypeAlias = Union[ContentOutputText, ContentRefusal] + + +class OutputMessage(BaseModel): + id: str + """The unique ID of the output message.""" + + content: List[Content] + """The content of the output message.""" + + role: Literal["assistant"] + """The role of the output message. Always `assistant`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + type: Literal["message"] + """The type of the output message. Always `message`.""" diff --git a/src/digitalocean_genai_sdk/types/output_message_param.py b/src/digitalocean_genai_sdk/types/output_message_param.py new file mode 100644 index 00000000..83f13e18 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/output_message_param.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "OutputMessageParam", + "Content", + "ContentOutputText", + "ContentOutputTextAnnotation", + "ContentOutputTextAnnotationFileCitation", + "ContentOutputTextAnnotationURLCitation", + "ContentOutputTextAnnotationFilePath", + "ContentRefusal", +] + + +class ContentOutputTextAnnotationFileCitation(TypedDict, total=False): + file_id: Required[str] + """The ID of the file.""" + + index: Required[int] + """The index of the file in the list of files.""" + + type: Required[Literal["file_citation"]] + """The type of the file citation. Always `file_citation`.""" + + +class ContentOutputTextAnnotationURLCitation(TypedDict, total=False): + end_index: Required[int] + """The index of the last character of the URL citation in the message.""" + + start_index: Required[int] + """The index of the first character of the URL citation in the message.""" + + title: Required[str] + """The title of the web resource.""" + + type: Required[Literal["url_citation"]] + """The type of the URL citation. Always `url_citation`.""" + + url: Required[str] + """The URL of the web resource.""" + + +class ContentOutputTextAnnotationFilePath(TypedDict, total=False): + file_id: Required[str] + """The ID of the file.""" + + index: Required[int] + """The index of the file in the list of files.""" + + type: Required[Literal["file_path"]] + """The type of the file path. Always `file_path`.""" + + +ContentOutputTextAnnotation: TypeAlias = Union[ + ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath +] + + +class ContentOutputText(TypedDict, total=False): + annotations: Required[Iterable[ContentOutputTextAnnotation]] + """The annotations of the text output.""" + + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +class ContentRefusal(TypedDict, total=False): + refusal: Required[str] + """The refusal explanationfrom the model.""" + + type: Required[Literal["refusal"]] + """The type of the refusal. Always `refusal`.""" + + +Content: TypeAlias = Union[ContentOutputText, ContentRefusal] + + +class OutputMessageParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the output message.""" + + content: Required[Iterable[Content]] + """The content of the output message.""" + + role: Required[Literal["assistant"]] + """The role of the output message. Always `assistant`.""" + + status: Required[Literal["in_progress", "completed", "incomplete"]] + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + type: Required[Literal["message"]] + """The type of the output message. Always `message`.""" diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_session_params.py new file mode 100644 index 00000000..df105bac --- /dev/null +++ b/src/digitalocean_genai_sdk/types/realtime_create_session_params.py @@ -0,0 +1,230 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, TypedDict + +from .voice_ids_shared_param import VoiceIDsSharedParam + +__all__ = [ + "RealtimeCreateSessionParams", + "InputAudioNoiseReduction", + "InputAudioTranscription", + "Tool", + "TurnDetection", +] + + +class RealtimeCreateSessionParams(TypedDict, total=False): + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ + + input_audio_noise_reduction: InputAudioNoiseReduction + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + input_audio_transcription: InputAudioTranscription + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. + + For audio models a temperature of 0.8 is highly recommended for best + performance. + """ + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[Tool] + """Tools (functions) available to the model.""" + + turn_detection: TurnDetection + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. Server VAD means that the model will detect the start + and end of speech based on audio volume and respond at the end of user speech. + Semantic VAD is more advanced and uses a turn detection model (in conjuction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ + + voice: VoiceIDsSharedParam + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + """ + + +class InputAudioNoiseReduction(TypedDict, total=False): + type: Literal["near_field", "far_field"] + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class InputAudioTranscription(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + + model: str + """ + The model to use for transcription, current options are `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `whisper-1`. + """ + + prompt: str + """ + An optional text to guide the model's style or continue a previous audio + segment. For `whisper-1`, the + [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For + `gpt-4o-transcribe` models, the prompt is a free text string, for example + "expect words related to technology". + """ + + +class Tool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(TypedDict, total=False): + create_response: bool + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + eagerness: Literal["low", "medium", "high", "auto"] + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. + """ + + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + prefix_padding_ms: int + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: int + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: float + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + type: Literal["server_vad", "semantic_vad"] + """Type of turn detection.""" diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_session_response.py new file mode 100644 index 00000000..1b7bc03c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/realtime_create_session_response.py @@ -0,0 +1,151 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .voice_ids_shared import VoiceIDsShared + +__all__ = ["RealtimeCreateSessionResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class ClientSecret(BaseModel): + expires_at: int + """Timestamp for when the token expires. + + Currently, all tokens expire after one minute. + """ + + value: str + """ + Ephemeral key usable in client environments to authenticate connections to the + Realtime API. Use this in client-side environments rather than a standard API + token, which should only be used server-side. + """ + + +class InputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class RealtimeCreateSessionResponse(BaseModel): + client_secret: ClientSecret + """Ephemeral key returned by the API.""" + + input_audio_format: Optional[str] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[InputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[str] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[Tool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[TurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[VoiceIDsShared] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py new file mode 100644 index 00000000..21912679 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py @@ -0,0 +1,149 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = [ + "RealtimeCreateTranscriptionSessionParams", + "InputAudioNoiseReduction", + "InputAudioTranscription", + "TurnDetection", +] + + +class RealtimeCreateTranscriptionSessionParams(TypedDict, total=False): + include: List[str] + """The set of items to include in the transcription. Current available items are: + + - `item.input_audio_transcription.logprobs` + """ + + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ + + input_audio_noise_reduction: InputAudioNoiseReduction + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + input_audio_transcription: InputAudioTranscription + """Configuration for input audio transcription. + + The client can optionally set the language and prompt for transcription, these + offer additional guidance to the transcription service. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + turn_detection: TurnDetection + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. Server VAD means that the model will detect the start + and end of speech based on audio volume and respond at the end of user speech. + Semantic VAD is more advanced and uses a turn detection model (in conjuction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ + + +class InputAudioNoiseReduction(TypedDict, total=False): + type: Literal["near_field", "far_field"] + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class InputAudioTranscription(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + + model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"] + """ + The model to use for transcription, current options are `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `whisper-1`. + """ + + prompt: str + """ + An optional text to guide the model's style or continue a previous audio + segment. For `whisper-1`, the + [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For + `gpt-4o-transcribe` models, the prompt is a free text string, for example + "expect words related to technology". + """ + + +class TurnDetection(TypedDict, total=False): + create_response: bool + """Whether or not to automatically generate a response when a VAD stop event + occurs. + + Not available for transcription sessions. + """ + + eagerness: Literal["low", "medium", "high", "auto"] + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. + """ + + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. Not available for transcription sessions. + """ + + prefix_padding_ms: int + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: int + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: float + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + type: Literal["server_vad", "semantic_vad"] + """Type of turn detection.""" diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py new file mode 100644 index 00000000..bbd0b9de --- /dev/null +++ b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["RealtimeCreateTranscriptionSessionResponse", "ClientSecret", "InputAudioTranscription", "TurnDetection"] + + +class ClientSecret(BaseModel): + expires_at: int + """Timestamp for when the token expires. + + Currently, all tokens expire after one minute. + """ + + value: str + """ + Ephemeral key usable in client environments to authenticate connections to the + Realtime API. Use this in client-side environments rather than a standard API + token, which should only be used server-side. + """ + + +class InputAudioTranscription(BaseModel): + language: Optional[str] = None + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + + model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None + """The model to use for transcription. + + Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`. + """ + + prompt: Optional[str] = None + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](/docs/guides/speech-to-text#prompting) should match the audio + language. + """ + + +class TurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class RealtimeCreateTranscriptionSessionResponse(BaseModel): + client_secret: ClientSecret + """Ephemeral key returned by the API. + + Only present when the session is created on the server via REST API. + """ + + input_audio_format: Optional[str] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[InputAudioTranscription] = None + """Configuration of the transcription model.""" + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + turn_detection: Optional[TurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ diff --git a/src/digitalocean_genai_sdk/types/reasoning_effort.py b/src/digitalocean_genai_sdk/types/reasoning_effort.py new file mode 100644 index 00000000..ace21b67 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/reasoning_effort.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal, TypeAlias + +__all__ = ["ReasoningEffort"] + +ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] diff --git a/src/digitalocean_genai_sdk/types/reasoning_item.py b/src/digitalocean_genai_sdk/types/reasoning_item.py new file mode 100644 index 00000000..28a64183 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/reasoning_item.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ReasoningItem", "Summary"] + + +class Summary(BaseModel): + text: str + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Literal["summary_text"] + """The type of the object. Always `summary_text`.""" + + +class ReasoningItem(BaseModel): + id: str + """The unique identifier of the reasoning content.""" + + summary: List[Summary] + """Reasoning text contents.""" + + type: Literal["reasoning"] + """The type of the object. Always `reasoning`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/reasoning_item_param.py b/src/digitalocean_genai_sdk/types/reasoning_item_param.py new file mode 100644 index 00000000..4d2a0504 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/reasoning_item_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ReasoningItemParam", "Summary"] + + +class Summary(TypedDict, total=False): + text: Required[str] + """ + A short summary of the reasoning used by the model when generating the response. + """ + + type: Required[Literal["summary_text"]] + """The type of the object. Always `summary_text`.""" + + +class ReasoningItemParam(TypedDict, total=False): + id: Required[str] + """The unique identifier of the reasoning content.""" + + summary: Required[Iterable[Summary]] + """Reasoning text contents.""" + + type: Required[Literal["reasoning"]] + """The type of the object. Always `reasoning`.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/digitalocean_genai_sdk/types/response.py b/src/digitalocean_genai_sdk/types/response.py new file mode 100644 index 00000000..523eedfc --- /dev/null +++ b/src/digitalocean_genai_sdk/types/response.py @@ -0,0 +1,142 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from .._utils import PropertyInfo +from .._models import BaseModel +from .output_message import OutputMessage +from .reasoning_item import ReasoningItem +from .computer_tool_call import ComputerToolCall +from .function_tool_call import FunctionToolCall +from .response_properties import ResponseProperties +from .web_search_tool_call import WebSearchToolCall +from .file_search_tool_call import FileSearchToolCall +from .model_response_properties import ModelResponseProperties + +__all__ = [ + "Response", + "ResponseError", + "ResponseIncompleteDetails", + "ResponseOutput", + "ResponseUsage", + "ResponseUsageInputTokensDetails", + "ResponseUsageOutputTokensDetails", +] + + +class ResponseError(BaseModel): + code: Literal[ + "server_error", + "rate_limit_exceeded", + "invalid_prompt", + "vector_store_timeout", + "invalid_image", + "invalid_image_format", + "invalid_base64_image", + "invalid_image_url", + "image_too_large", + "image_too_small", + "image_parse_error", + "image_content_policy_violation", + "invalid_image_mode", + "image_file_too_large", + "unsupported_image_media_type", + "empty_image_file", + "failed_to_download_image", + "image_file_not_found", + ] + """The error code for the response.""" + + message: str + """A human-readable description of the error.""" + + +class ResponseIncompleteDetails(BaseModel): + reason: Optional[Literal["max_output_tokens", "content_filter"]] = None + """The reason why the response is incomplete.""" + + +ResponseOutput: TypeAlias = Annotated[ + Union[OutputMessage, FileSearchToolCall, FunctionToolCall, WebSearchToolCall, ComputerToolCall, ReasoningItem], + PropertyInfo(discriminator="type"), +] + + +class ResponseUsageInputTokensDetails(BaseModel): + cached_tokens: int + """The number of tokens that were retrieved from the cache. + + [More on prompt caching](/docs/guides/prompt-caching). + """ + + +class ResponseUsageOutputTokensDetails(BaseModel): + reasoning_tokens: int + """The number of reasoning tokens.""" + + +class ResponseUsage(BaseModel): + input_tokens: int + """The number of input tokens.""" + + input_tokens_details: ResponseUsageInputTokensDetails + """A detailed breakdown of the input tokens.""" + + output_tokens: int + """The number of output tokens.""" + + output_tokens_details: ResponseUsageOutputTokensDetails + """A detailed breakdown of the output tokens.""" + + total_tokens: int + """The total number of tokens used.""" + + +class Response(ModelResponseProperties, ResponseProperties): + id: str + """Unique identifier for this Response.""" + + created_at: float + """Unix timestamp (in seconds) of when this Response was created.""" + + error: Optional[ResponseError] = None + """An error object returned when the model fails to generate a Response.""" + + incomplete_details: Optional[ResponseIncompleteDetails] = None + """Details about why the response is incomplete.""" + + object: Literal["response"] + """The object type of this resource - always set to `response`.""" + + output: List[ResponseOutput] + """An array of content items generated by the model. + + - The length and order of items in the `output` array is dependent on the + model's response. + - Rather than accessing the first item in the `output` array and assuming it's + an `assistant` message with the content generated by the model, you might + consider using the `output_text` property where supported in SDKs. + """ + + parallel_tool_calls: bool + """Whether to allow the model to run tool calls in parallel.""" + + output_text: Optional[str] = None + """ + SDK-only convenience property that contains the aggregated text output from all + `output_text` items in the `output` array, if any are present. Supported in the + Python and JavaScript SDKs. + """ + + status: Optional[Literal["completed", "failed", "in_progress", "incomplete"]] = None + """The status of the response generation. + + One of `completed`, `failed`, `in_progress`, or `incomplete`. + """ + + usage: Optional[ResponseUsage] = None + """ + Represents token usage details including input tokens, output tokens, a + breakdown of output tokens, and the total tokens used. + """ diff --git a/src/digitalocean_genai_sdk/types/response_create_params.py b/src/digitalocean_genai_sdk/types/response_create_params.py new file mode 100644 index 00000000..878e53a5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/response_create_params.py @@ -0,0 +1,494 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .includable import Includable +from .reasoning_effort import ReasoningEffort +from .input_content_param import InputContentParam +from .input_message_param import InputMessageParam +from .output_message_param import OutputMessageParam +from .reasoning_item_param import ReasoningItemParam +from .compound_filter_param import CompoundFilterParam +from .comparison_filter_param import ComparisonFilterParam +from .computer_tool_call_param import ComputerToolCallParam +from .function_tool_call_param import FunctionToolCallParam +from .web_search_tool_call_param import WebSearchToolCallParam +from .file_search_tool_call_param import FileSearchToolCallParam +from .chat.web_search_context_size import WebSearchContextSize +from .chat.web_search_location_param import WebSearchLocationParam +from .chat.response_format_text_param import ResponseFormatTextParam +from .computer_tool_call_output_param import ComputerToolCallOutputParam +from .function_tool_call_output_param import FunctionToolCallOutputParam +from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam + +__all__ = [ + "ResponseCreateParams", + "InputInputItemList", + "InputInputItemListMessage", + "InputInputItemListItemReference", + "Reasoning", + "Text", + "TextFormat", + "TextFormatTextResponseFormatJsonSchema", + "ToolChoice", + "ToolChoiceToolChoiceTypes", + "ToolChoiceToolChoiceFunction", + "Tool", + "ToolFileSearchTool", + "ToolFileSearchToolFilters", + "ToolFileSearchToolRankingOptions", + "ToolFunctionTool", + "ToolComputerTool", + "ToolWebSearchTool", + "ToolWebSearchToolUserLocation", +] + + +class ResponseCreateParams(TypedDict, total=False): + input: Required[Union[str, Iterable[InputInputItemList]]] + """Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Image inputs](/docs/guides/images) + - [File inputs](/docs/guides/pdf-files) + - [Conversation state](/docs/guides/conversation-state) + - [Function calling](/docs/guides/function-calling) + """ + + model: Required[ + Union[ + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "computer-use-preview", + "computer-use-preview-2025-03-11", + ], + str, + ] + ] + """Model ID used to generate the response, like `gpt-4o` or `o1`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the [model guide](/docs/models) to + browse and compare available models. + """ + + include: Optional[List[Includable]] + """Specify additional output data to include in the model response. + + Currently supported values are: + + - `file_search_call.results`: Include the search results of + + the file search tool call. + + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + """ + + instructions: Optional[str] + """ + Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + """ + + max_output_tokens: Optional[int] + """ + An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + parallel_tool_calls: Optional[bool] + """Whether to allow the model to run tool calls in parallel.""" + + previous_response_id: Optional[str] + """The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](/docs/guides/conversation-state). + """ + + reasoning: Optional[Reasoning] + """**o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + + store: Optional[bool] + """Whether to store the generated model response for later retrieval via API.""" + + stream: Optional[bool] + """ + If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the [Streaming section below](/docs/api-reference/responses-streaming) for + more information. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + text: Text + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Structured Outputs](/docs/guides/structured-outputs) + """ + + tool_choice: ToolChoice + """ + How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + """ + + tools: Iterable[Tool] + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like [web search](/docs/guides/tools-web-search) or + [file search](/docs/guides/tools-file-search). Learn more about + [built-in tools](/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](/docs/guides/function-calling). + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + truncation: Optional[Literal["auto", "disabled"]] + """The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + """ + + +class InputInputItemListMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[InputContentParam]]] + """ + Text, image, or audio input to the model, used to generate a response. Can also + contain previous assistant responses. + """ + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" + + +class InputInputItemListItemReference(TypedDict, total=False): + id: Required[str] + """The ID of the item to reference.""" + + type: Required[Literal["item_reference"]] + """The type of item to reference. Always `item_reference`.""" + + +InputInputItemList: TypeAlias = Union[ + InputInputItemListMessage, + InputMessageParam, + OutputMessageParam, + FileSearchToolCallParam, + ComputerToolCallParam, + ComputerToolCallOutputParam, + WebSearchToolCallParam, + FunctionToolCallParam, + FunctionToolCallOutputParam, + ReasoningItemParam, + InputInputItemListItemReference, +] + + +class Reasoning(TypedDict, total=False): + effort: Optional[ReasoningEffort] + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + generate_summary: Optional[Literal["concise", "detailed"]] + """**computer_use_preview only** + + A summary of the reasoning performed by the model. This can be useful for + debugging and understanding the model's reasoning process. One of `concise` or + `detailed`. + """ + + +class TextFormatTextResponseFormatJsonSchema(TypedDict, total=False): + schema: Required[Dict[str, object]] + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + type: Required[Literal["json_schema"]] + """The type of response format being defined. Always `json_schema`.""" + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + name: str + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + strict: Optional[bool] + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](/docs/guides/structured-outputs). + """ + + +TextFormat: TypeAlias = Union[ + ResponseFormatTextParam, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObjectParam +] + + +class Text(TypedDict, total=False): + format: TextFormat + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + +class ToolChoiceToolChoiceTypes(TypedDict, total=False): + type: Required[ + Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] + ] + """The type of hosted tool the model should to use. + + Learn more about [built-in tools](/docs/guides/tools). + + Allowed values are: + + - `file_search` + - `web_search_preview` + - `computer_use_preview` + """ + + +class ToolChoiceToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + type: Required[Literal["function"]] + """For function calling, the type is always `function`.""" + + +ToolChoice: TypeAlias = Union[ + Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction +] + +ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam] + + +class ToolFileSearchToolRankingOptions(TypedDict, total=False): + ranker: Literal["auto", "default-2024-11-15"] + """The ranker to use for the file search.""" + + score_threshold: float + """ + The score threshold for the file search, a number between 0 and 1. Numbers + closer to 1 will attempt to return only the most relevant results, but may + return fewer results. + """ + + +class ToolFileSearchTool(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of the file search tool. Always `file_search`.""" + + vector_store_ids: Required[List[str]] + """The IDs of the vector stores to search.""" + + filters: ToolFileSearchToolFilters + """A filter to apply based on file attributes.""" + + max_num_results: int + """The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + """ + + ranking_options: ToolFileSearchToolRankingOptions + """Ranking options for search.""" + + +class ToolFunctionTool(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + parameters: Required[Dict[str, object]] + """A JSON schema object describing the parameters of the function.""" + + strict: Required[bool] + """Whether to enforce strict parameter validation. Default `true`.""" + + type: Required[Literal["function"]] + """The type of the function tool. Always `function`.""" + + description: Optional[str] + """A description of the function. + + Used by the model to determine whether or not to call the function. + """ + + +class ToolComputerTool(TypedDict, total=False): + display_height: Required[float] + """The height of the computer display.""" + + display_width: Required[float] + """The width of the computer display.""" + + environment: Required[Literal["mac", "windows", "ubuntu", "browser"]] + """The type of computer environment to control.""" + + type: Required[Literal["computer_use_preview"]] + """The type of the computer use tool. Always `computer_use_preview`.""" + + +class ToolWebSearchToolUserLocation(WebSearchLocationParam, total=False): + type: Required[Literal["approximate"]] + """The type of location approximation. Always `approximate`.""" + + +class ToolWebSearchTool(TypedDict, total=False): + type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] + """The type of the web search tool. One of: + + - `web_search_preview` + - `web_search_preview_2025_03_11` + """ + + search_context_size: WebSearchContextSize + """ + High level guidance for the amount of context window space to use for the + search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[ToolWebSearchToolUserLocation] + """Approximate location parameters for the search.""" + + +Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool] diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py b/src/digitalocean_genai_sdk/types/response_list_input_items_params.py new file mode 100644 index 00000000..cba0c8b8 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/response_list_input_items_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ResponseListInputItemsParams"] + + +class ResponseListInputItemsParams(TypedDict, total=False): + after: str + """An item ID to list items after, used in pagination.""" + + before: str + """An item ID to list items before, used in pagination.""" + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """The order to return the input items in. Default is `asc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py b/src/digitalocean_genai_sdk/types/response_list_input_items_response.py new file mode 100644 index 00000000..95f4555e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/response_list_input_items_response.py @@ -0,0 +1,76 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from .._utils import PropertyInfo +from .._models import BaseModel +from .input_message import InputMessage +from .output_message import OutputMessage +from .computer_tool_call import ComputerToolCall +from .function_tool_call import FunctionToolCall +from .web_search_tool_call import WebSearchToolCall +from .file_search_tool_call import FileSearchToolCall +from .computer_tool_call_output import ComputerToolCallOutput +from .function_tool_call_output import FunctionToolCallOutput + +__all__ = [ + "ResponseListInputItemsResponse", + "Data", + "DataMessage", + "DataComputerCallOutput", + "DataFunctionCall", + "DataFunctionCallOutput", +] + + +class DataMessage(InputMessage): + id: str + """The unique ID of the message input.""" + + +class DataComputerCallOutput(ComputerToolCallOutput): + id: str # type: ignore + """The unique ID of the computer call tool output.""" + + +class DataFunctionCall(FunctionToolCall): + id: str # type: ignore + """The unique ID of the function tool call.""" + + +class DataFunctionCallOutput(FunctionToolCallOutput): + id: str # type: ignore + """The unique ID of the function call tool output.""" + + +Data: TypeAlias = Annotated[ + Union[ + DataMessage, + OutputMessage, + FileSearchToolCall, + ComputerToolCall, + DataComputerCallOutput, + WebSearchToolCall, + DataFunctionCall, + DataFunctionCallOutput, + ], + PropertyInfo(discriminator="type"), +] + + +class ResponseListInputItemsResponse(BaseModel): + data: List[Data] + """A list of items used to generate this response.""" + + first_id: str + """The ID of the first item in the list.""" + + has_more: bool + """Whether there are more items available.""" + + last_id: str + """The ID of the last item in the list.""" + + object: Literal["list"] + """The type of object returned, must be `list`.""" diff --git a/src/digitalocean_genai_sdk/types/response_properties.py b/src/digitalocean_genai_sdk/types/response_properties.py new file mode 100644 index 00000000..84746be5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/response_properties.py @@ -0,0 +1,362 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .compound_filter import CompoundFilter +from .reasoning_effort import ReasoningEffort +from .comparison_filter import ComparisonFilter +from .chat.web_search_location import WebSearchLocation +from .chat.response_format_text import ResponseFormatText +from .chat.web_search_context_size import WebSearchContextSize +from .chat.response_format_json_object import ResponseFormatJsonObject + +__all__ = [ + "ResponseProperties", + "Reasoning", + "Text", + "TextFormat", + "TextFormatTextResponseFormatJsonSchema", + "ToolChoice", + "ToolChoiceToolChoiceTypes", + "ToolChoiceToolChoiceFunction", + "Tool", + "ToolFileSearchTool", + "ToolFileSearchToolFilters", + "ToolFileSearchToolRankingOptions", + "ToolFunctionTool", + "ToolComputerTool", + "ToolWebSearchTool", + "ToolWebSearchToolUserLocation", +] + + +class Reasoning(BaseModel): + effort: Optional[ReasoningEffort] = None + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + generate_summary: Optional[Literal["concise", "detailed"]] = None + """**computer_use_preview only** + + A summary of the reasoning performed by the model. This can be useful for + debugging and understanding the model's reasoning process. One of `concise` or + `detailed`. + """ + + +class TextFormatTextResponseFormatJsonSchema(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + type: Literal["json_schema"] + """The type of response format being defined. Always `json_schema`.""" + + description: Optional[str] = None + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + name: Optional[str] = None + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + strict: Optional[bool] = None + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. Only a subset of JSON Schema is supported when `strict` is `true`. To + learn more, read the + [Structured Outputs guide](/docs/guides/structured-outputs). + """ + + +TextFormat: TypeAlias = Union[ResponseFormatText, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObject] + + +class Text(BaseModel): + format: Optional[TextFormat] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + +class ToolChoiceToolChoiceTypes(BaseModel): + type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] + """The type of hosted tool the model should to use. + + Learn more about [built-in tools](/docs/guides/tools). + + Allowed values are: + + - `file_search` + - `web_search_preview` + - `computer_use_preview` + """ + + +class ToolChoiceToolChoiceFunction(BaseModel): + name: str + """The name of the function to call.""" + + type: Literal["function"] + """For function calling, the type is always `function`.""" + + +ToolChoice: TypeAlias = Union[ + Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction +] + +ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilter, CompoundFilter] + + +class ToolFileSearchToolRankingOptions(BaseModel): + ranker: Optional[Literal["auto", "default-2024-11-15"]] = None + """The ranker to use for the file search.""" + + score_threshold: Optional[float] = None + """ + The score threshold for the file search, a number between 0 and 1. Numbers + closer to 1 will attempt to return only the most relevant results, but may + return fewer results. + """ + + +class ToolFileSearchTool(BaseModel): + type: Literal["file_search"] + """The type of the file search tool. Always `file_search`.""" + + vector_store_ids: List[str] + """The IDs of the vector stores to search.""" + + filters: Optional[ToolFileSearchToolFilters] = None + """A filter to apply based on file attributes.""" + + max_num_results: Optional[int] = None + """The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + """ + + ranking_options: Optional[ToolFileSearchToolRankingOptions] = None + """Ranking options for search.""" + + +class ToolFunctionTool(BaseModel): + name: str + """The name of the function to call.""" + + parameters: Dict[str, object] + """A JSON schema object describing the parameters of the function.""" + + strict: bool + """Whether to enforce strict parameter validation. Default `true`.""" + + type: Literal["function"] + """The type of the function tool. Always `function`.""" + + description: Optional[str] = None + """A description of the function. + + Used by the model to determine whether or not to call the function. + """ + + +class ToolComputerTool(BaseModel): + display_height: float + """The height of the computer display.""" + + display_width: float + """The width of the computer display.""" + + environment: Literal["mac", "windows", "ubuntu", "browser"] + """The type of computer environment to control.""" + + type: Literal["computer_use_preview"] + """The type of the computer use tool. Always `computer_use_preview`.""" + + +class ToolWebSearchToolUserLocation(WebSearchLocation): + type: Literal["approximate"] + """The type of location approximation. Always `approximate`.""" + + +class ToolWebSearchTool(BaseModel): + type: Literal["web_search_preview", "web_search_preview_2025_03_11"] + """The type of the web search tool. One of: + + - `web_search_preview` + - `web_search_preview_2025_03_11` + """ + + search_context_size: Optional[WebSearchContextSize] = None + """ + High level guidance for the amount of context window space to use for the + search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[ToolWebSearchToolUserLocation] = None + """Approximate location parameters for the search.""" + + +Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool] + + +class ResponseProperties(BaseModel): + instructions: Optional[str] = None + """ + Inserts a system (or developer) message as the first item in the model's + context. + + When using along with `previous_response_id`, the instructions from a previous + response will be not be carried over to the next response. This makes it simple + to swap out system (or developer) messages in new responses. + """ + + max_output_tokens: Optional[int] = None + """ + An upper bound for the number of tokens that can be generated for a response, + including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + """ + + model: Union[ + Literal[ + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "computer-use-preview", + "computer-use-preview-2025-03-11", + ], + str, + None, + ] = None + """Model ID used to generate the response, like `gpt-4o` or `o1`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the [model guide](/docs/models) to + browse and compare available models. + """ + + previous_response_id: Optional[str] = None + """The unique ID of the previous response to the model. + + Use this to create multi-turn conversations. Learn more about + [conversation state](/docs/guides/conversation-state). + """ + + reasoning: Optional[Reasoning] = None + """**o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + + text: Optional[Text] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](/docs/guides/text) + - [Structured Outputs](/docs/guides/structured-outputs) + """ + + tool_choice: Optional[ToolChoice] = None + """ + How the model should select which tool (or tools) to use when generating a + response. See the `tools` parameter to see how to specify which tools the model + can call. + """ + + tools: Optional[List[Tool]] = None + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like [web search](/docs/guides/tools-web-search) or + [file search](/docs/guides/tools-file-search). Learn more about + [built-in tools](/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](/docs/guides/function-calling). + """ + + truncation: Optional[Literal["auto", "disabled"]] = None + """The truncation strategy to use for the model response. + + - `auto`: If the context of this response and previous ones exceeds the model's + context window size, the model will truncate the response to fit the context + window by dropping input items in the middle of the conversation. + - `disabled` (default): If a model response will exceed the context window size + for a model, the request will fail with a 400 error. + """ diff --git a/src/digitalocean_genai_sdk/types/response_retrieve_params.py b/src/digitalocean_genai_sdk/types/response_retrieve_params.py new file mode 100644 index 00000000..b85dbba1 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/response_retrieve_params.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +from .includable import Includable + +__all__ = ["ResponseRetrieveParams"] + + +class ResponseRetrieveParams(TypedDict, total=False): + include: List[Includable] + """Specify additional output data to include in the response. + + Currently supported values are: + + - `file_search_call.results`: Include the search results of + + the file search tool call. + + - `message.input_image.image_url`: Include image urls from the input message. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + """ diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy.py new file mode 100644 index 00000000..a4c0ce82 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/static_chunking_strategy.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["StaticChunkingStrategy"] + + +class StaticChunkingStrategy(BaseModel): + chunk_overlap_tokens: int + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: int + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py new file mode 100644 index 00000000..c3535404 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["StaticChunkingStrategyParam"] + + +class StaticChunkingStrategyParam(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py new file mode 100644 index 00000000..51de3b75 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .static_chunking_strategy_param import StaticChunkingStrategyParam + +__all__ = ["StaticChunkingStrategyRequestParam"] + + +class StaticChunkingStrategyRequestParam(TypedDict, total=False): + static: Required[StaticChunkingStrategyParam] + + type: Required[Literal["static"]] + """Always `static`.""" diff --git a/src/digitalocean_genai_sdk/types/stop_configuration_param.py b/src/digitalocean_genai_sdk/types/stop_configuration_param.py new file mode 100644 index 00000000..d3093c7c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/stop_configuration_param.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import TypeAlias + +__all__ = ["StopConfigurationParam"] + +StopConfigurationParam: TypeAlias = Union[Optional[str], List[str]] diff --git a/src/digitalocean_genai_sdk/types/thread_create_params.py b/src/digitalocean_genai_sdk/types/thread_create_params.py new file mode 100644 index 00000000..7ee77039 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/thread_create_params.py @@ -0,0 +1,130 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .threads.create_message_request_param import CreateMessageRequestParam + +__all__ = [ + "ThreadCreateParams", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", + "ToolResourcesFileSearchVectorStore", + "ToolResourcesFileSearchVectorStoreChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy", + "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic", +] + + +class ThreadCreateParams(TypedDict, total=False): + messages: Iterable[CreateMessageRequestParam] + """A list of [messages](/docs/api-reference/messages) to start the thread with.""" + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + tool_resources: Optional[ToolResources] + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ + + +class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False): + static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic] + + type: Required[Literal["static"]] + """Always `static`.""" + + +ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ + ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy, + ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy, +] + + +class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ + + file_ids: List[str] + """A list of [file](/docs/api-reference/files) IDs to add to the vector store. + + There can be a maximum of 10000 files in a vector store. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The [vector store](/docs/api-reference/vector-stores/object) attached to this + thread. There can be a maximum of 1 vector store attached to the thread. + """ + + vector_stores: Iterable[ToolResourcesFileSearchVectorStore] + """ + A helper to create a [vector store](/docs/api-reference/vector-stores/object) + with file_ids and attach it to this thread. There can be a maximum of 1 vector + store attached to the thread. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch diff --git a/src/digitalocean_genai_sdk/types/thread_delete_response.py b/src/digitalocean_genai_sdk/types/thread_delete_response.py new file mode 100644 index 00000000..74f09d84 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/thread_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ThreadDeleteResponse"] + + +class ThreadDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["thread.deleted"] diff --git a/src/digitalocean_genai_sdk/types/thread_object.py b/src/digitalocean_genai_sdk/types/thread_object.py new file mode 100644 index 00000000..7924dd8f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/thread_object.py @@ -0,0 +1,60 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ThreadObject", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + + +class ToolResourcesCodeInterpreter(BaseModel): + file_ids: Optional[List[str]] = None + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearch(BaseModel): + vector_store_ids: Optional[List[str]] = None + """ + The [vector store](/docs/api-reference/vector-stores/object) attached to this + thread. There can be a maximum of 1 vector store attached to the thread. + """ + + +class ToolResources(BaseModel): + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None + + file_search: Optional[ToolResourcesFileSearch] = None + + +class ThreadObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the thread was created.""" + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + object: Literal["thread"] + """The object type, which is always `thread`.""" + + tool_resources: Optional[ToolResources] = None + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ diff --git a/src/digitalocean_genai_sdk/types/thread_update_params.py b/src/digitalocean_genai_sdk/types/thread_update_params.py new file mode 100644 index 00000000..d952d35b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/thread_update_params.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Optional +from typing_extensions import TypedDict + +__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + + +class ThreadUpdateParams(TypedDict, total=False): + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + tool_resources: Optional[ToolResources] + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The [vector store](/docs/api-reference/vector-stores/object) attached to this + thread. There can be a maximum of 1 vector store attached to the thread. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch diff --git a/src/digitalocean_genai_sdk/types/threads/__init__.py b/src/digitalocean_genai_sdk/types/threads/__init__.py new file mode 100644 index 00000000..9af8d93a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/__init__.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .run_object import RunObject as RunObject +from .message_object import MessageObject as MessageObject +from .run_list_params import RunListParams as RunListParams +from .run_create_params import RunCreateParams as RunCreateParams +from .run_list_response import RunListResponse as RunListResponse +from .run_update_params import RunUpdateParams as RunUpdateParams +from .truncation_object import TruncationObject as TruncationObject +from .message_list_params import MessageListParams as MessageListParams +from .message_create_params import MessageCreateParams as MessageCreateParams +from .message_list_response import MessageListResponse as MessageListResponse +from .message_update_params import MessageUpdateParams as MessageUpdateParams +from .run_create_run_params import RunCreateRunParams as RunCreateRunParams +from .message_delete_response import MessageDeleteResponse as MessageDeleteResponse +from .truncation_object_param import TruncationObjectParam as TruncationObjectParam +from .create_message_request_param import CreateMessageRequestParam as CreateMessageRequestParam +from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams +from .message_content_image_url_object import MessageContentImageURLObject as MessageContentImageURLObject +from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption as AssistantsAPIToolChoiceOption +from .message_content_image_file_object import MessageContentImageFileObject as MessageContentImageFileObject +from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly as AssistantToolsFileSearchTypeOnly +from .message_content_image_url_object_param import ( + MessageContentImageURLObjectParam as MessageContentImageURLObjectParam, +) +from .assistants_api_tool_choice_option_param import ( + AssistantsAPIToolChoiceOptionParam as AssistantsAPIToolChoiceOptionParam, +) +from .message_content_image_file_object_param import ( + MessageContentImageFileObjectParam as MessageContentImageFileObjectParam, +) +from .assistant_tools_file_search_type_only_param import ( + AssistantToolsFileSearchTypeOnlyParam as AssistantToolsFileSearchTypeOnlyParam, +) diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py new file mode 100644 index 00000000..6708bff3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["AssistantToolsFileSearchTypeOnly"] + + +class AssistantToolsFileSearchTypeOnly(BaseModel): + type: Literal["file_search"] + """The type of tool being defined: `file_search`""" diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py new file mode 100644 index 00000000..f0a48b2c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AssistantToolsFileSearchTypeOnlyParam"] + + +class AssistantToolsFileSearchTypeOnlyParam(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py new file mode 100644 index 00000000..af7be1f7 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel + +__all__ = ["AssistantsAPIToolChoiceOption", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"] + + +class AssistantsNamedToolChoiceFunction(BaseModel): + name: str + """The name of the function to call.""" + + +class AssistantsNamedToolChoice(BaseModel): + type: Literal["function", "code_interpreter", "file_search"] + """The type of the tool. If type is `function`, the function name must be set""" + + function: Optional[AssistantsNamedToolChoiceFunction] = None + + +AssistantsAPIToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice] diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py new file mode 100644 index 00000000..10f98f89 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["AssistantsAPIToolChoiceOptionParam", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"] + + +class AssistantsNamedToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class AssistantsNamedToolChoice(TypedDict, total=False): + type: Required[Literal["function", "code_interpreter", "file_search"]] + """The type of the tool. If type is `function`, the function name must be set""" + + function: AssistantsNamedToolChoiceFunction + + +AssistantsAPIToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice] diff --git a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py b/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py new file mode 100644 index 00000000..64c2a781 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py @@ -0,0 +1,71 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..assistant_tools_code_param import AssistantToolsCodeParam +from .message_content_image_url_object_param import MessageContentImageURLObjectParam +from .message_content_image_file_object_param import MessageContentImageFileObjectParam +from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam + +__all__ = [ + "CreateMessageRequestParam", + "ContentArrayOfContentPart", + "ContentArrayOfContentPartMessageRequestContentTextObject", + "Attachment", + "AttachmentTool", +] + + +class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False): + text: Required[str] + """Text content to be sent to the model""" + + type: Required[Literal["text"]] + """Always `text`.""" + + +ContentArrayOfContentPart: TypeAlias = Union[ + MessageContentImageFileObjectParam, + MessageContentImageURLObjectParam, + ContentArrayOfContentPartMessageRequestContentTextObject, +] + +AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam] + + +class Attachment(TypedDict, total=False): + file_id: str + """The ID of the file to attach to the message.""" + + tools: Iterable[AttachmentTool] + """The tools to add this file to.""" + + +class CreateMessageRequestParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ContentArrayOfContentPart]]] + """The text contents of the message.""" + + role: Required[Literal["user", "assistant"]] + """The role of the entity that is creating the message. Allowed values include: + + - `user`: Indicates the message is sent by an actual user and should be used in + most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this + value to insert messages from the assistant into the conversation. + """ + + attachments: Optional[Iterable[Attachment]] + """A list of files attached to the message, and the tools they should be added to.""" + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py new file mode 100644 index 00000000..b22ef410 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["MessageContentImageFileObject", "ImageFile"] + + +class ImageFile(BaseModel): + file_id: str + """The [File](/docs/api-reference/files) ID of the image in the message content. + + Set `purpose="vision"` when uploading the File if you need to later display the + file content. + """ + + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image if specified by the user. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. + """ + + +class MessageContentImageFileObject(BaseModel): + image_file: ImageFile + + type: Literal["image_file"] + """Always `image_file`.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py new file mode 100644 index 00000000..734dcf15 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["MessageContentImageFileObjectParam", "ImageFile"] + + +class ImageFile(TypedDict, total=False): + file_id: Required[str] + """The [File](/docs/api-reference/files) ID of the image in the message content. + + Set `purpose="vision"` when uploading the File if you need to later display the + file content. + """ + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image if specified by the user. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. + """ + + +class MessageContentImageFileObjectParam(TypedDict, total=False): + image_file: Required[ImageFile] + + type: Required[Literal["image_file"]] + """Always `image_file`.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py new file mode 100644 index 00000000..9a7f980b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["MessageContentImageURLObject", "ImageURL"] + + +class ImageURL(BaseModel): + url: str + """ + The external URL of the image, must be a supported image types: jpeg, jpg, png, + gif, webp. + """ + + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. Default + value is `auto` + """ + + +class MessageContentImageURLObject(BaseModel): + image_url: ImageURL + + type: Literal["image_url"] + """The type of the content part.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py new file mode 100644 index 00000000..f3f777c4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["MessageContentImageURLObjectParam", "ImageURL"] + + +class ImageURL(TypedDict, total=False): + url: Required[str] + """ + The external URL of the image, must be a supported image types: jpeg, jpg, png, + gif, webp. + """ + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. Default + value is `auto` + """ + + +class MessageContentImageURLObjectParam(TypedDict, total=False): + image_url: Required[ImageURL] + + type: Required[Literal["image_url"]] + """The type of the content part.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_create_params.py b/src/digitalocean_genai_sdk/types/threads/message_create_params.py new file mode 100644 index 00000000..d9a4cd40 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_create_params.py @@ -0,0 +1,71 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..assistant_tools_code_param import AssistantToolsCodeParam +from .message_content_image_url_object_param import MessageContentImageURLObjectParam +from .message_content_image_file_object_param import MessageContentImageFileObjectParam +from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam + +__all__ = [ + "MessageCreateParams", + "ContentArrayOfContentPart", + "ContentArrayOfContentPartMessageRequestContentTextObject", + "Attachment", + "AttachmentTool", +] + + +class MessageCreateParams(TypedDict, total=False): + content: Required[Union[str, Iterable[ContentArrayOfContentPart]]] + """The text contents of the message.""" + + role: Required[Literal["user", "assistant"]] + """The role of the entity that is creating the message. Allowed values include: + + - `user`: Indicates the message is sent by an actual user and should be used in + most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this + value to insert messages from the assistant into the conversation. + """ + + attachments: Optional[Iterable[Attachment]] + """A list of files attached to the message, and the tools they should be added to.""" + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False): + text: Required[str] + """Text content to be sent to the model""" + + type: Required[Literal["text"]] + """Always `text`.""" + + +ContentArrayOfContentPart: TypeAlias = Union[ + MessageContentImageFileObjectParam, + MessageContentImageURLObjectParam, + ContentArrayOfContentPartMessageRequestContentTextObject, +] + +AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam] + + +class Attachment(TypedDict, total=False): + file_id: str + """The ID of the file to attach to the message.""" + + tools: Iterable[AttachmentTool] + """The tools to add this file to.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py b/src/digitalocean_genai_sdk/types/threads/message_delete_response.py new file mode 100644 index 00000000..c86408dc --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["MessageDeleteResponse"] + + +class MessageDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["thread.message.deleted"] diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_params.py b/src/digitalocean_genai_sdk/types/threads/message_list_params.py new file mode 100644 index 00000000..a7c22a66 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["MessageListParams"] + + +class MessageListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ + + run_id: str + """Filter messages by the run ID that generated them.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_response.py b/src/digitalocean_genai_sdk/types/threads/message_list_response.py new file mode 100644 index 00000000..f710da32 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel +from .message_object import MessageObject + +__all__ = ["MessageListResponse"] + + +class MessageListResponse(BaseModel): + data: List[MessageObject] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/threads/message_object.py b/src/digitalocean_genai_sdk/types/threads/message_object.py new file mode 100644 index 00000000..b2cb3711 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_object.py @@ -0,0 +1,179 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from ..assistant_tools_code import AssistantToolsCode +from .message_content_image_url_object import MessageContentImageURLObject +from .message_content_image_file_object import MessageContentImageFileObject +from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly + +__all__ = [ + "MessageObject", + "Attachment", + "AttachmentTool", + "Content", + "ContentMessageContentTextObject", + "ContentMessageContentTextObjectText", + "ContentMessageContentTextObjectTextAnnotation", + "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject", + "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation", + "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject", + "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath", + "ContentMessageContentRefusalObject", + "IncompleteDetails", +] + +AttachmentTool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearchTypeOnly] + + +class Attachment(BaseModel): + file_id: Optional[str] = None + """The ID of the file to attach to the message.""" + + tools: Optional[List[AttachmentTool]] = None + """The tools to add this file to.""" + + +class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation( + BaseModel +): + file_id: str + """The ID of the specific File the citation is from.""" + + +class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject(BaseModel): + end_index: int + + file_citation: ( + ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation + ) + + start_index: int + + text: str + """The text in the message content that needs to be replaced.""" + + type: Literal["file_citation"] + """Always `file_citation`.""" + + +class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath(BaseModel): + file_id: str + """The ID of the file that was generated.""" + + +class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject(BaseModel): + end_index: int + + file_path: ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath + + start_index: int + + text: str + """The text in the message content that needs to be replaced.""" + + type: Literal["file_path"] + """Always `file_path`.""" + + +ContentMessageContentTextObjectTextAnnotation: TypeAlias = Union[ + ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject, + ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject, +] + + +class ContentMessageContentTextObjectText(BaseModel): + annotations: List[ContentMessageContentTextObjectTextAnnotation] + + value: str + """The data that makes up the text.""" + + +class ContentMessageContentTextObject(BaseModel): + text: ContentMessageContentTextObjectText + + type: Literal["text"] + """Always `text`.""" + + +class ContentMessageContentRefusalObject(BaseModel): + refusal: str + + type: Literal["refusal"] + """Always `refusal`.""" + + +Content: TypeAlias = Union[ + MessageContentImageFileObject, + MessageContentImageURLObject, + ContentMessageContentTextObject, + ContentMessageContentRefusalObject, +] + + +class IncompleteDetails(BaseModel): + reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"] + """The reason the message is incomplete.""" + + +class MessageObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + assistant_id: Optional[str] = None + """ + If applicable, the ID of the [assistant](/docs/api-reference/assistants) that + authored this message. + """ + + attachments: Optional[List[Attachment]] = None + """A list of files attached to the message, and the tools they were added to.""" + + completed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the message was completed.""" + + content: List[Content] + """The content of the message in array of text and/or images.""" + + created_at: int + """The Unix timestamp (in seconds) for when the message was created.""" + + incomplete_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the message was marked as incomplete.""" + + incomplete_details: Optional[IncompleteDetails] = None + """On an incomplete message, details about why the message is incomplete.""" + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + object: Literal["thread.message"] + """The object type, which is always `thread.message`.""" + + role: Literal["user", "assistant"] + """The entity that produced the message. One of `user` or `assistant`.""" + + run_id: Optional[str] = None + """ + The ID of the [run](/docs/api-reference/runs) associated with the creation of + this message. Value is `null` when messages are created manually using the + create message or create thread endpoints. + """ + + status: Literal["in_progress", "incomplete", "completed"] + """ + The status of the message, which can be either `in_progress`, `incomplete`, or + `completed`. + """ + + thread_id: str + """The [thread](/docs/api-reference/threads) ID that this message belongs to.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_update_params.py b/src/digitalocean_genai_sdk/types/threads/message_update_params.py new file mode 100644 index 00000000..a2e25260 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/message_update_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["MessageUpdateParams"] + + +class MessageUpdateParams(TypedDict, total=False): + thread_id: Required[str] + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_params.py new file mode 100644 index 00000000..43d0611a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_create_params.py @@ -0,0 +1,215 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .truncation_object_param import TruncationObjectParam +from ..assistant_tools_code_param import AssistantToolsCodeParam +from ..create_thread_request_param import CreateThreadRequestParam +from ..assistant_tools_function_param import AssistantToolsFunctionParam +from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam +from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam +from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam + +__all__ = ["RunCreateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"] + + +class RunCreateParams(TypedDict, total=False): + assistant_id: Required[str] + """ + The ID of the [assistant](/docs/api-reference/assistants) to use to execute this + run. + """ + + instructions: Optional[str] + """Override the default system message of the assistant. + + This is useful for modifying the behavior on a per-run basis. + """ + + max_completion_tokens: Optional[int] + """ + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + """ + + max_prompt_tokens: Optional[int] + """The maximum number of prompt tokens that may be used over the course of the run. + + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: Union[ + str, + Literal[ + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4.5-preview", + "gpt-4.5-preview-2025-02-27", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ], + None, + ] + """The ID of the [Model](/docs/api-reference/models) to be used to execute this + run. + + If a value is provided here, it will override the model associated with the + assistant. If not, the model associated with the assistant will be used. + """ + + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + """ + + response_format: Optional[AssistantsAPIResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + stream: Optional[bool] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + thread: CreateThreadRequestParam + """Options to create a new thread. + + If no thread is provided when running a request, an empty thread will be + created. + """ + + tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + """ + + tool_resources: Optional[ToolResources] + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + tools: Optional[Iterable[Tool]] + """Override the tools the assistant can use for this run. + + This is useful for modifying the behavior on a per-run basis. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + truncation_strategy: Optional[TruncationObjectParam] + """Controls for how a thread will be truncated prior to the run. + + Use this to control the intial context window of the run. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](/docs/api-reference/files) IDs made available to the + `code_interpreter` tool. There can be a maximum of 20 files associated with the + tool. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached + to this assistant. There can be a maximum of 1 vector store attached to the + assistant. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch + + +Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py new file mode 100644 index 00000000..694c7eea --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py @@ -0,0 +1,178 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..reasoning_effort import ReasoningEffort +from .truncation_object_param import TruncationObjectParam +from ..assistant_supported_models import AssistantSupportedModels +from ..assistant_tools_code_param import AssistantToolsCodeParam +from .create_message_request_param import CreateMessageRequestParam +from ..assistant_tools_function_param import AssistantToolsFunctionParam +from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam +from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam +from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam + +__all__ = ["RunCreateRunParams", "Tool"] + + +class RunCreateRunParams(TypedDict, total=False): + assistant_id: Required[str] + """ + The ID of the [assistant](/docs/api-reference/assistants) to use to execute this + run. + """ + + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + additional_instructions: Optional[str] + """Appends additional instructions at the end of the instructions for the run. + + This is useful for modifying the behavior on a per-run basis without overriding + other instructions. + """ + + additional_messages: Optional[Iterable[CreateMessageRequestParam]] + """Adds additional messages to the thread before creating the run.""" + + instructions: Optional[str] + """ + Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of + the assistant. This is useful for modifying the behavior on a per-run basis. + """ + + max_completion_tokens: Optional[int] + """ + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + """ + + max_prompt_tokens: Optional[int] + """The maximum number of prompt tokens that may be used over the course of the run. + + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: Union[str, AssistantSupportedModels, None] + """The ID of the [Model](/docs/api-reference/models) to be used to execute this + run. + + If a value is provided here, it will override the model associated with the + assistant. If not, the model associated with the assistant will be used. + """ + + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + """ + + reasoning_effort: Optional[ReasoningEffort] + """**o-series models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + response_format: Optional[AssistantsAPIResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + stream: Optional[bool] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + """ + + tools: Optional[Iterable[Tool]] + """Override the tools the assistant can use for this run. + + This is useful for modifying the behavior on a per-run basis. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + truncation_strategy: Optional[TruncationObjectParam] + """Controls for how a thread will be truncated prior to the run. + + Use this to control the intial context window of the run. + """ + + +Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_params.py b/src/digitalocean_genai_sdk/types/threads/run_list_params.py new file mode 100644 index 00000000..fbea54f6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_list_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["RunListParams"] + + +class RunListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_response.py b/src/digitalocean_genai_sdk/types/threads/run_list_response.py new file mode 100644 index 00000000..899bd0f9 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel +from .run_object import RunObject + +__all__ = ["RunListResponse"] + + +class RunListResponse(BaseModel): + data: List[RunObject] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/threads/run_object.py b/src/digitalocean_genai_sdk/types/threads/run_object.py new file mode 100644 index 00000000..fa89f4b4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_object.py @@ -0,0 +1,265 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .truncation_object import TruncationObject +from ..assistant_tools_code import AssistantToolsCode +from ..assistant_tools_function import AssistantToolsFunction +from ..assistant_tools_file_search import AssistantToolsFileSearch +from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption +from ..assistants_api_response_format_option import AssistantsAPIResponseFormatOption + +__all__ = [ + "RunObject", + "IncompleteDetails", + "LastError", + "RequiredAction", + "RequiredActionSubmitToolOutputs", + "RequiredActionSubmitToolOutputsToolCall", + "RequiredActionSubmitToolOutputsToolCallFunction", + "Tool", + "Usage", +] + + +class IncompleteDetails(BaseModel): + reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None + """The reason why the run is incomplete. + + This will point to which specific token limit was reached over the course of the + run. + """ + + +class LastError(BaseModel): + code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"] + """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.""" + + message: str + """A human-readable description of the error.""" + + +class RequiredActionSubmitToolOutputsToolCallFunction(BaseModel): + arguments: str + """The arguments that the model expects you to pass to the function.""" + + name: str + """The name of the function.""" + + +class RequiredActionSubmitToolOutputsToolCall(BaseModel): + id: str + """The ID of the tool call. + + This ID must be referenced when you submit the tool outputs in using the + [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) + endpoint. + """ + + function: RequiredActionSubmitToolOutputsToolCallFunction + """The function definition.""" + + type: Literal["function"] + """The type of tool call the output is required for. + + For now, this is always `function`. + """ + + +class RequiredActionSubmitToolOutputs(BaseModel): + tool_calls: List[RequiredActionSubmitToolOutputsToolCall] + """A list of the relevant tool calls.""" + + +class RequiredAction(BaseModel): + submit_tool_outputs: RequiredActionSubmitToolOutputs + """Details on the tool outputs needed for this run to continue.""" + + type: Literal["submit_tool_outputs"] + """For now, this is always `submit_tool_outputs`.""" + + +Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction] + + +class Usage(BaseModel): + completion_tokens: int + """Number of completion tokens used over the course of the run.""" + + prompt_tokens: int + """Number of prompt tokens used over the course of the run.""" + + total_tokens: int + """Total number of tokens used (prompt + completion).""" + + +class RunObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + assistant_id: str + """ + The ID of the [assistant](/docs/api-reference/assistants) used for execution of + this run. + """ + + cancelled_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run was cancelled.""" + + completed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run was completed.""" + + created_at: int + """The Unix timestamp (in seconds) for when the run was created.""" + + expires_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run will expire.""" + + failed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run failed.""" + + incomplete_details: Optional[IncompleteDetails] = None + """Details on why the run is incomplete. + + Will be `null` if the run is not incomplete. + """ + + instructions: str + """ + The instructions that the [assistant](/docs/api-reference/assistants) used for + this run. + """ + + last_error: Optional[LastError] = None + """The last error associated with this run. Will be `null` if there are no errors.""" + + max_completion_tokens: Optional[int] = None + """ + The maximum number of completion tokens specified to have been used over the + course of the run. + """ + + max_prompt_tokens: Optional[int] = None + """ + The maximum number of prompt tokens specified to have been used over the course + of the run. + """ + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: str + """ + The model that the [assistant](/docs/api-reference/assistants) used for this + run. + """ + + object: Literal["thread.run"] + """The object type, which is always `thread.run`.""" + + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + """ + + required_action: Optional[RequiredAction] = None + """Details on the action required to continue the run. + + Will be `null` if no action is required. + """ + + response_format: Optional[AssistantsAPIResponseFormatOption] = None + """Specifies the format that the model must output. + + Compatible with [GPT-4o](/docs/models#gpt-4o), + [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + started_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run was started.""" + + status: Literal[ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "incomplete", + "expired", + ] + """ + The status of the run, which can be either `queued`, `in_progress`, + `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + `incomplete`, or `expired`. + """ + + thread_id: str + """ + The ID of the [thread](/docs/api-reference/threads) that was executed on as a + part of this run. + """ + + tool_choice: Optional[AssistantsAPIToolChoiceOption] = None + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + """ + + tools: List[Tool] + """ + The list of tools that the [assistant](/docs/api-reference/assistants) used for + this run. + """ + + truncation_strategy: Optional[TruncationObject] = None + """Controls for how a thread will be truncated prior to the run. + + Use this to control the intial context window of the run. + """ + + usage: Optional[Usage] = None + """Usage statistics related to the run. + + This value will be `null` if the run is not in a terminal state (i.e. + `in_progress`, `queued`, etc.). + """ + + temperature: Optional[float] = None + """The sampling temperature used for this run. If not set, defaults to 1.""" + + top_p: Optional[float] = None + """The nucleus sampling value used for this run. If not set, defaults to 1.""" diff --git a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py b/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py new file mode 100644 index 00000000..77ab84ba --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["RunSubmitToolOutputsParams", "ToolOutput"] + + +class RunSubmitToolOutputsParams(TypedDict, total=False): + thread_id: Required[str] + + tool_outputs: Required[Iterable[ToolOutput]] + """A list of tools for which the outputs are being submitted.""" + + stream: Optional[bool] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + +class ToolOutput(TypedDict, total=False): + output: str + """The output of the tool call to be submitted to continue the run.""" + + tool_call_id: str + """ + The ID of the tool call in the `required_action` object within the run object + the output is being submitted for. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/run_update_params.py b/src/digitalocean_genai_sdk/types/threads/run_update_params.py new file mode 100644 index 00000000..7b84a9b5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/run_update_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["RunUpdateParams"] + + +class RunUpdateParams(TypedDict, total=False): + thread_id: Required[str] + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py new file mode 100644 index 00000000..3cab1f9c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .run_step_object import RunStepObject as RunStepObject +from .step_list_params import StepListParams as StepListParams +from .step_list_response import StepListResponse as StepListResponse +from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams diff --git a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py b/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py new file mode 100644 index 00000000..3ede68fa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py @@ -0,0 +1,323 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ...._models import BaseModel +from ...file_search_ranker import FileSearchRanker + +__all__ = [ + "RunStepObject", + "LastError", + "StepDetails", + "StepDetailsRunStepDetailsMessageCreationObject", + "StepDetailsRunStepDetailsMessageCreationObjectMessageCreation", + "StepDetailsRunStepDetailsToolCallsObject", + "StepDetailsRunStepDetailsToolCallsObjectToolCall", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject", + "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction", + "Usage", +] + + +class LastError(BaseModel): + code: Literal["server_error", "rate_limit_exceeded"] + """One of `server_error` or `rate_limit_exceeded`.""" + + message: str + """A human-readable description of the error.""" + + +class StepDetailsRunStepDetailsMessageCreationObjectMessageCreation(BaseModel): + message_id: str + """The ID of the message that was created by this run step.""" + + +class StepDetailsRunStepDetailsMessageCreationObject(BaseModel): + message_creation: StepDetailsRunStepDetailsMessageCreationObjectMessageCreation + + type: Literal["message_creation"] + """Always `message_creation`.""" + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject( + BaseModel +): + logs: str + """The text output from the Code Interpreter tool call.""" + + type: Literal["logs"] + """Always `logs`.""" + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage( + BaseModel +): + file_id: str + """The [file](/docs/api-reference/files) ID of the image.""" + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject( + BaseModel +): + image: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage + + type: Literal["image"] + """Always `image`.""" + + +StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput: TypeAlias = Union[ + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject, + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject, +] + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter(BaseModel): + input: str + """The input to the Code Interpreter tool call.""" + + outputs: List[ + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput + ] + """The outputs from the Code Interpreter tool call. + + Code Interpreter can output one or more items, including text (`logs`) or images + (`image`). Each of these are represented by a different object type. + """ + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject(BaseModel): + id: str + """The ID of the tool call.""" + + code_interpreter: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter + """The Code Interpreter tool call definition.""" + + type: Literal["code_interpreter"] + """The type of tool call. + + This is always going to be `code_interpreter` for this type of tool call. + """ + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions( + BaseModel +): + ranker: FileSearchRanker + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent( + BaseModel +): + text: Optional[str] = None + """The text content of the file.""" + + type: Optional[Literal["text"]] = None + """The type of the content.""" + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult( + BaseModel +): + file_id: str + """The ID of the file that result was found in.""" + + file_name: str + """The name of the file that result was found in.""" + + score: float + """The score of the result. + + All values must be a floating point number between 0 and 1. + """ + + content: Optional[ + List[ + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent + ] + ] = None + """The content of the result that was found. + + The content is only included if requested via the include query parameter. + """ + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch(BaseModel): + ranking_options: Optional[ + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions + ] = None + """The ranking options for the file search.""" + + results: Optional[ + List[StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult] + ] = None + """The results of the file search.""" + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject(BaseModel): + id: str + """The ID of the tool call object.""" + + file_search: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch + """For now, this is always going to be an empty object.""" + + type: Literal["file_search"] + """The type of tool call. + + This is always going to be `file_search` for this type of tool call. + """ + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction(BaseModel): + arguments: str + """The arguments passed to the function.""" + + name: str + """The name of the function.""" + + output: Optional[str] = None + """The output of the function. + + This will be `null` if the outputs have not been + [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + """ + + +class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject(BaseModel): + id: str + """The ID of the tool call object.""" + + function: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction + """The definition of the function that was called.""" + + type: Literal["function"] + """The type of tool call. + + This is always going to be `function` for this type of tool call. + """ + + +StepDetailsRunStepDetailsToolCallsObjectToolCall: TypeAlias = Union[ + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject, + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject, + StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject, +] + + +class StepDetailsRunStepDetailsToolCallsObject(BaseModel): + tool_calls: List[StepDetailsRunStepDetailsToolCallsObjectToolCall] + """An array of tool calls the run step was involved in. + + These can be associated with one of three types of tools: `code_interpreter`, + `file_search`, or `function`. + """ + + type: Literal["tool_calls"] + """Always `tool_calls`.""" + + +StepDetails: TypeAlias = Union[StepDetailsRunStepDetailsMessageCreationObject, StepDetailsRunStepDetailsToolCallsObject] + + +class Usage(BaseModel): + completion_tokens: int + """Number of completion tokens used over the course of the run step.""" + + prompt_tokens: int + """Number of prompt tokens used over the course of the run step.""" + + total_tokens: int + """Total number of tokens used (prompt + completion).""" + + +class RunStepObject(BaseModel): + id: str + """The identifier of the run step, which can be referenced in API endpoints.""" + + assistant_id: str + """ + The ID of the [assistant](/docs/api-reference/assistants) associated with the + run step. + """ + + cancelled_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run step was cancelled.""" + + completed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run step completed.""" + + created_at: int + """The Unix timestamp (in seconds) for when the run step was created.""" + + expired_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run step expired. + + A step is considered expired if the parent run is expired. + """ + + failed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the run step failed.""" + + last_error: Optional[LastError] = None + """The last error associated with this run step. + + Will be `null` if there are no errors. + """ + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + object: Literal["thread.run.step"] + """The object type, which is always `thread.run.step`.""" + + run_id: str + """The ID of the [run](/docs/api-reference/runs) that this run step is a part of.""" + + status: Literal["in_progress", "cancelled", "failed", "completed", "expired"] + """ + The status of the run step, which can be either `in_progress`, `cancelled`, + `failed`, `completed`, or `expired`. + """ + + step_details: StepDetails + """The details of the run step.""" + + thread_id: str + """The ID of the [thread](/docs/api-reference/threads) that was run.""" + + type: Literal["message_creation", "tool_calls"] + """The type of run step, which can be either `message_creation` or `tool_calls`.""" + + usage: Optional[Usage] = None + """Usage statistics related to the run step. + + This value will be `null` while the run step's status is `in_progress`. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py new file mode 100644 index 00000000..6383fcb3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["StepListParams"] + + +class StepListParams(TypedDict, total=False): + thread_id: Required[str] + + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py new file mode 100644 index 00000000..93ccb4ca --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ...._models import BaseModel +from .run_step_object import RunStepObject + +__all__ = ["StepListResponse"] + + +class StepListResponse(BaseModel): + data: List[RunStepObject] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py new file mode 100644 index 00000000..ce6bcbfb --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["StepRetrieveParams"] + + +class StepRetrieveParams(TypedDict, total=False): + thread_id: Required[str] + + run_id: Required[str] + + include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object.py b/src/digitalocean_genai_sdk/types/threads/truncation_object.py new file mode 100644 index 00000000..7c81b3b5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/truncation_object.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["TruncationObject"] + + +class TruncationObject(BaseModel): + type: Literal["auto", "last_messages"] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] = None + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py b/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py new file mode 100644 index 00000000..98d942fa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["TruncationObjectParam"] + + +class TruncationObjectParam(TypedDict, total=False): + type: Required[Literal["auto", "last_messages"]] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ diff --git a/src/digitalocean_genai_sdk/types/transcription_segment.py b/src/digitalocean_genai_sdk/types/transcription_segment.py new file mode 100644 index 00000000..2345fa18 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/transcription_segment.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .._models import BaseModel + +__all__ = ["TranscriptionSegment"] + + +class TranscriptionSegment(BaseModel): + id: int + """Unique identifier of the segment.""" + + avg_logprob: float + """Average logprob of the segment. + + If the value is lower than -1, consider the logprobs failed. + """ + + compression_ratio: float + """Compression ratio of the segment. + + If the value is greater than 2.4, consider the compression failed. + """ + + end: float + """End time of the segment in seconds.""" + + no_speech_prob: float + """Probability of no speech in the segment. + + If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this + segment silent. + """ + + seek: int + """Seek offset of the segment.""" + + start: float + """Start time of the segment in seconds.""" + + temperature: float + """Temperature parameter used for generating the segment.""" + + text: str + """Text content of the segment.""" + + tokens: List[int] + """Array of token IDs for the text content.""" diff --git a/src/digitalocean_genai_sdk/types/upload.py b/src/digitalocean_genai_sdk/types/upload.py new file mode 100644 index 00000000..06b8a806 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/upload.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .openai_file import OpenAIFile + +__all__ = ["Upload"] + + +class Upload(BaseModel): + id: str + """The Upload unique identifier, which can be referenced in API endpoints.""" + + bytes: int + """The intended number of bytes to be uploaded.""" + + created_at: int + """The Unix timestamp (in seconds) for when the Upload was created.""" + + expires_at: int + """The Unix timestamp (in seconds) for when the Upload will expire.""" + + filename: str + """The name of the file to be uploaded.""" + + purpose: str + """The intended purpose of the file. + + [Please refer here](/docs/api-reference/files/object#files/object-purpose) for + acceptable values. + """ + + status: Literal["pending", "completed", "cancelled", "expired"] + """The status of the Upload.""" + + file: Optional[OpenAIFile] = None + """The `File` object represents a document that has been uploaded to OpenAI.""" + + object: Optional[Literal["upload"]] = None + """The object type, which is always "upload".""" diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_params.py b/src/digitalocean_genai_sdk/types/upload_add_part_params.py new file mode 100644 index 00000000..a0c8b61c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/upload_add_part_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .._types import FileTypes + +__all__ = ["UploadAddPartParams"] + + +class UploadAddPartParams(TypedDict, total=False): + data: Required[FileTypes] + """The chunk of bytes for this Part.""" diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_response.py b/src/digitalocean_genai_sdk/types/upload_add_part_response.py new file mode 100644 index 00000000..fb091f76 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/upload_add_part_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["UploadAddPartResponse"] + + +class UploadAddPartResponse(BaseModel): + id: str + """The upload Part unique identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the Part was created.""" + + object: Literal["upload.part"] + """The object type, which is always `upload.part`.""" + + upload_id: str + """The ID of the Upload object that this Part was added to.""" diff --git a/src/digitalocean_genai_sdk/types/upload_complete_params.py b/src/digitalocean_genai_sdk/types/upload_complete_params.py new file mode 100644 index 00000000..cce568d5 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/upload_complete_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +__all__ = ["UploadCompleteParams"] + + +class UploadCompleteParams(TypedDict, total=False): + part_ids: Required[List[str]] + """The ordered list of Part IDs.""" + + md5: str + """ + The optional md5 checksum for the file contents to verify if the bytes uploaded + matches what you expect. + """ diff --git a/src/digitalocean_genai_sdk/types/upload_create_params.py b/src/digitalocean_genai_sdk/types/upload_create_params.py new file mode 100644 index 00000000..eab9a51b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/upload_create_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UploadCreateParams"] + + +class UploadCreateParams(TypedDict, total=False): + bytes: Required[int] + """The number of bytes in the file you are uploading.""" + + filename: Required[str] + """The name of the file to upload.""" + + mime_type: Required[str] + """The MIME type of the file. + + This must fall within the supported MIME types for your file purpose. See the + supported MIME types for assistants and vision. + """ + + purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] + """The intended purpose of the uploaded file. + + See the + [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). + """ diff --git a/src/digitalocean_genai_sdk/types/usage_response.py b/src/digitalocean_genai_sdk/types/usage_response.py new file mode 100644 index 00000000..9f70e7c4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/usage_response.py @@ -0,0 +1,352 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from .._models import BaseModel + +__all__ = [ + "UsageResponse", + "Data", + "DataResult", + "DataResultUsageCompletionsResult", + "DataResultUsageEmbeddingsResult", + "DataResultUsageModerationsResult", + "DataResultUsageImagesResult", + "DataResultUsageAudioSpeechesResult", + "DataResultUsageAudioTranscriptionsResult", + "DataResultUsageVectorStoresResult", + "DataResultUsageCodeInterpreterSessionsResult", + "DataResultCostsResult", + "DataResultCostsResultAmount", +] + + +class DataResultUsageCompletionsResult(BaseModel): + input_tokens: int + """The aggregated number of text input tokens used, including cached tokens. + + For customers subscribe to scale tier, this includes scale tier tokens. + """ + + num_model_requests: int + """The count of requests made to the model.""" + + object: Literal["organization.usage.completions.result"] + + output_tokens: int + """The aggregated number of text output tokens used. + + For customers subscribe to scale tier, this includes scale tier tokens. + """ + + api_key_id: Optional[str] = None + """ + When `group_by=api_key_id`, this field provides the API key ID of the grouped + usage result. + """ + + batch: Optional[bool] = None + """ + When `group_by=batch`, this field tells whether the grouped usage result is + batch or not. + """ + + input_audio_tokens: Optional[int] = None + """The aggregated number of audio input tokens used, including cached tokens.""" + + input_cached_tokens: Optional[int] = None + """ + The aggregated number of text input tokens that has been cached from previous + requests. For customers subscribe to scale tier, this includes scale tier + tokens. + """ + + model: Optional[str] = None + """ + When `group_by=model`, this field provides the model name of the grouped usage + result. + """ + + output_audio_tokens: Optional[int] = None + """The aggregated number of audio output tokens used.""" + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + user_id: Optional[str] = None + """ + When `group_by=user_id`, this field provides the user ID of the grouped usage + result. + """ + + +class DataResultUsageEmbeddingsResult(BaseModel): + input_tokens: int + """The aggregated number of input tokens used.""" + + num_model_requests: int + """The count of requests made to the model.""" + + object: Literal["organization.usage.embeddings.result"] + + api_key_id: Optional[str] = None + """ + When `group_by=api_key_id`, this field provides the API key ID of the grouped + usage result. + """ + + model: Optional[str] = None + """ + When `group_by=model`, this field provides the model name of the grouped usage + result. + """ + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + user_id: Optional[str] = None + """ + When `group_by=user_id`, this field provides the user ID of the grouped usage + result. + """ + + +class DataResultUsageModerationsResult(BaseModel): + input_tokens: int + """The aggregated number of input tokens used.""" + + num_model_requests: int + """The count of requests made to the model.""" + + object: Literal["organization.usage.moderations.result"] + + api_key_id: Optional[str] = None + """ + When `group_by=api_key_id`, this field provides the API key ID of the grouped + usage result. + """ + + model: Optional[str] = None + """ + When `group_by=model`, this field provides the model name of the grouped usage + result. + """ + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + user_id: Optional[str] = None + """ + When `group_by=user_id`, this field provides the user ID of the grouped usage + result. + """ + + +class DataResultUsageImagesResult(BaseModel): + images: int + """The number of images processed.""" + + num_model_requests: int + """The count of requests made to the model.""" + + object: Literal["organization.usage.images.result"] + + api_key_id: Optional[str] = None + """ + When `group_by=api_key_id`, this field provides the API key ID of the grouped + usage result. + """ + + model: Optional[str] = None + """ + When `group_by=model`, this field provides the model name of the grouped usage + result. + """ + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + size: Optional[str] = None + """ + When `group_by=size`, this field provides the image size of the grouped usage + result. + """ + + source: Optional[str] = None + """ + When `group_by=source`, this field provides the source of the grouped usage + result, possible values are `image.generation`, `image.edit`, `image.variation`. + """ + + user_id: Optional[str] = None + """ + When `group_by=user_id`, this field provides the user ID of the grouped usage + result. + """ + + +class DataResultUsageAudioSpeechesResult(BaseModel): + characters: int + """The number of characters processed.""" + + num_model_requests: int + """The count of requests made to the model.""" + + object: Literal["organization.usage.audio_speeches.result"] + + api_key_id: Optional[str] = None + """ + When `group_by=api_key_id`, this field provides the API key ID of the grouped + usage result. + """ + + model: Optional[str] = None + """ + When `group_by=model`, this field provides the model name of the grouped usage + result. + """ + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + user_id: Optional[str] = None + """ + When `group_by=user_id`, this field provides the user ID of the grouped usage + result. + """ + + +class DataResultUsageAudioTranscriptionsResult(BaseModel): + num_model_requests: int + """The count of requests made to the model.""" + + object: Literal["organization.usage.audio_transcriptions.result"] + + seconds: int + """The number of seconds processed.""" + + api_key_id: Optional[str] = None + """ + When `group_by=api_key_id`, this field provides the API key ID of the grouped + usage result. + """ + + model: Optional[str] = None + """ + When `group_by=model`, this field provides the model name of the grouped usage + result. + """ + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + user_id: Optional[str] = None + """ + When `group_by=user_id`, this field provides the user ID of the grouped usage + result. + """ + + +class DataResultUsageVectorStoresResult(BaseModel): + object: Literal["organization.usage.vector_stores.result"] + + usage_bytes: int + """The vector stores usage in bytes.""" + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + +class DataResultUsageCodeInterpreterSessionsResult(BaseModel): + object: Literal["organization.usage.code_interpreter_sessions.result"] + + num_sessions: Optional[int] = None + """The number of code interpreter sessions.""" + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + usage result. + """ + + +class DataResultCostsResultAmount(BaseModel): + currency: Optional[str] = None + """Lowercase ISO-4217 currency e.g. "usd" """ + + value: Optional[float] = None + """The numeric value of the cost.""" + + +class DataResultCostsResult(BaseModel): + object: Literal["organization.costs.result"] + + amount: Optional[DataResultCostsResultAmount] = None + """The monetary value in its associated currency.""" + + line_item: Optional[str] = None + """ + When `group_by=line_item`, this field provides the line item of the grouped + costs result. + """ + + project_id: Optional[str] = None + """ + When `group_by=project_id`, this field provides the project ID of the grouped + costs result. + """ + + +DataResult: TypeAlias = Union[ + DataResultUsageCompletionsResult, + DataResultUsageEmbeddingsResult, + DataResultUsageModerationsResult, + DataResultUsageImagesResult, + DataResultUsageAudioSpeechesResult, + DataResultUsageAudioTranscriptionsResult, + DataResultUsageVectorStoresResult, + DataResultUsageCodeInterpreterSessionsResult, + DataResultCostsResult, +] + + +class Data(BaseModel): + end_time: int + + object: Literal["bucket"] + + result: List[DataResult] + + start_time: int + + +class UsageResponse(BaseModel): + data: List[Data] + + has_more: bool + + next_page: str + + object: Literal["page"] diff --git a/src/digitalocean_genai_sdk/types/vector_store_create_params.py b/src/digitalocean_genai_sdk/types/vector_store_create_params.py new file mode 100644 index 00000000..48118e80 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_create_params.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Optional +from typing_extensions import TypeAlias, TypedDict + +from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam +from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam +from .static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam + +__all__ = ["VectorStoreCreateParams", "ChunkingStrategy"] + + +class VectorStoreCreateParams(TypedDict, total=False): + chunking_strategy: ChunkingStrategy + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. + """ + + expires_after: VectorStoreExpirationAfterParam + """The expiration policy for a vector store.""" + + file_ids: List[str] + """A list of [File](/docs/api-reference/files) IDs that the vector store should + use. + + Useful for tools like `file_search` that can access files. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: str + """The name of the vector store.""" + + +ChunkingStrategy: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam] diff --git a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py b/src/digitalocean_genai_sdk/types/vector_store_delete_response.py new file mode 100644 index 00000000..17d3ee21 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["VectorStoreDeleteResponse"] + + +class VectorStoreDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["vector_store.deleted"] diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py new file mode 100644 index 00000000..1d417d52 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["VectorStoreExpirationAfter"] + + +class VectorStoreExpirationAfter(BaseModel): + anchor: Literal["last_active_at"] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: int + """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py new file mode 100644 index 00000000..29a008c7 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["VectorStoreExpirationAfterParam"] + + +class VectorStoreExpirationAfterParam(TypedDict, total=False): + anchor: Required[Literal["last_active_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: Required[int] + """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_params.py b/src/digitalocean_genai_sdk/types/vector_store_list_params.py new file mode 100644 index 00000000..e26ff90a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_list_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VectorStoreListParams"] + + +class VectorStoreListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_response.py b/src/digitalocean_genai_sdk/types/vector_store_list_response.py new file mode 100644 index 00000000..2dc455ea --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_list_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from .._models import BaseModel +from .vector_store_object import VectorStoreObject + +__all__ = ["VectorStoreListResponse"] + + +class VectorStoreListResponse(BaseModel): + data: List[VectorStoreObject] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/vector_store_object.py b/src/digitalocean_genai_sdk/types/vector_store_object.py new file mode 100644 index 00000000..ebd52a31 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_object.py @@ -0,0 +1,71 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .vector_store_expiration_after import VectorStoreExpirationAfter + +__all__ = ["VectorStoreObject", "FileCounts"] + + +class FileCounts(BaseModel): + cancelled: int + """The number of files that were cancelled.""" + + completed: int + """The number of files that have been successfully processed.""" + + failed: int + """The number of files that have failed to process.""" + + in_progress: int + """The number of files that are currently being processed.""" + + total: int + """The total number of files.""" + + +class VectorStoreObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the vector store was created.""" + + file_counts: FileCounts + + last_active_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the vector store was last active.""" + + metadata: Optional[Dict[str, str]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: str + """The name of the vector store.""" + + object: Literal["vector_store"] + """The object type, which is always `vector_store`.""" + + status: Literal["expired", "in_progress", "completed"] + """ + The status of the vector store, which can be either `expired`, `in_progress`, or + `completed`. A status of `completed` indicates that the vector store is ready + for use. + """ + + usage_bytes: int + """The total number of bytes used by the files in the vector store.""" + + expires_after: Optional[VectorStoreExpirationAfter] = None + """The expiration policy for a vector store.""" + + expires_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the vector store will expire.""" diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_params.py b/src/digitalocean_genai_sdk/types/vector_store_search_params.py new file mode 100644 index 00000000..5b90b063 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_search_params.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .compound_filter_param import CompoundFilterParam +from .comparison_filter_param import ComparisonFilterParam + +__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"] + + +class VectorStoreSearchParams(TypedDict, total=False): + query: Required[Union[str, List[str]]] + """A query string for a search""" + + filters: Filters + """A filter to apply based on file attributes.""" + + max_num_results: int + """The maximum number of results to return. + + This number should be between 1 and 50 inclusive. + """ + + ranking_options: RankingOptions + """Ranking options for search.""" + + rewrite_query: bool + """Whether to rewrite the natural language query for vector search.""" + + +Filters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam] + + +class RankingOptions(TypedDict, total=False): + ranker: Literal["auto", "default-2024-11-15"] + + score_threshold: float diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_response.py b/src/digitalocean_genai_sdk/types/vector_store_search_response.py new file mode 100644 index 00000000..b303f7ea --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_search_response.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["VectorStoreSearchResponse", "Data", "DataContent"] + + +class DataContent(BaseModel): + text: str + """The text content returned from search.""" + + type: Literal["text"] + """The type of content.""" + + +class Data(BaseModel): + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + content: List[DataContent] + """Content chunks from the file.""" + + file_id: str + """The ID of the vector store file.""" + + filename: str + """The name of the vector store file.""" + + score: float + """The similarity score for the result.""" + + +class VectorStoreSearchResponse(BaseModel): + data: List[Data] + """The list of search result items.""" + + has_more: bool + """Indicates if there are more results to fetch.""" + + next_page: Optional[str] = None + """The token for the next page, if any.""" + + object: Literal["vector_store.search_results.page"] + """The object type, which is always `vector_store.search_results.page`""" + + search_query: List[str] diff --git a/src/digitalocean_genai_sdk/types/vector_store_update_params.py b/src/digitalocean_genai_sdk/types/vector_store_update_params.py new file mode 100644 index 00000000..a9400cf2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_store_update_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import TypedDict + +from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam + +__all__ = ["VectorStoreUpdateParams"] + + +class VectorStoreUpdateParams(TypedDict, total=False): + expires_after: Optional[VectorStoreExpirationAfterParam] + """The expiration policy for a vector store.""" + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: Optional[str] + """The name of the vector store.""" diff --git a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py new file mode 100644 index 00000000..5018f06d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .file_list_params import FileListParams as FileListParams +from .file_create_params import FileCreateParams as FileCreateParams +from .file_update_params import FileUpdateParams as FileUpdateParams +from .file_delete_response import FileDeleteResponse as FileDeleteResponse +from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams +from .vector_store_file_object import VectorStoreFileObject as VectorStoreFileObject +from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams +from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse +from .vector_store_file_batch_object import VectorStoreFileBatchObject as VectorStoreFileBatchObject +from .chunking_strategy_request_param import ChunkingStrategyRequestParam as ChunkingStrategyRequestParam +from .list_vector_store_files_response import ListVectorStoreFilesResponse as ListVectorStoreFilesResponse diff --git a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py new file mode 100644 index 00000000..1dab9558 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from ..auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam +from ..static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam + +__all__ = ["ChunkingStrategyRequestParam"] + +ChunkingStrategyRequestParam: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam] diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py new file mode 100644 index 00000000..2e2bf227 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Optional +from typing_extensions import Required, TypedDict + +from .chunking_strategy_request_param import ChunkingStrategyRequestParam + +__all__ = ["FileBatchCreateParams"] + + +class FileBatchCreateParams(TypedDict, total=False): + file_ids: Required[List[str]] + """A list of [File](/docs/api-reference/files) IDs that the vector store should + use. + + Useful for tools like `file_search` that can access files. + """ + + attributes: Optional[Dict[str, Union[str, float, bool]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + chunking_strategy: ChunkingStrategyRequestParam + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py new file mode 100644 index 00000000..2a0a6c6a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FileBatchListFilesParams"] + + +class FileBatchListFilesParams(TypedDict, total=False): + vector_store_id: Required[str] + + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + filter: Literal["in_progress", "completed", "failed", "cancelled"] + """Filter by file status. + + One of `in_progress`, `completed`, `failed`, `cancelled`. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py new file mode 100644 index 00000000..6183f4e7 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Required, TypedDict + +from .chunking_strategy_request_param import ChunkingStrategyRequestParam + +__all__ = ["FileCreateParams"] + + +class FileCreateParams(TypedDict, total=False): + file_id: Required[str] + """A [File](/docs/api-reference/files) ID that the vector store should use. + + Useful for tools like `file_search` that can access files. + """ + + attributes: Optional[Dict[str, Union[str, float, bool]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + chunking_strategy: ChunkingStrategyRequestParam + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py new file mode 100644 index 00000000..24fbe570 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FileDeleteResponse"] + + +class FileDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["vector_store.file.deleted"] diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py new file mode 100644 index 00000000..867b5fb3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["FileListParams"] + + +class FileListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + filter: Literal["in_progress", "completed", "failed", "cancelled"] + """Filter by file status. + + One of `in_progress`, `completed`, `failed`, `cancelled`. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py new file mode 100644 index 00000000..e4f0966c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FileRetrieveContentResponse", "Data"] + + +class Data(BaseModel): + text: Optional[str] = None + """The text content""" + + type: Optional[str] = None + """The content type (currently only `"text"`)""" + + +class FileRetrieveContentResponse(BaseModel): + data: List[Data] + """Parsed content of the file.""" + + has_more: bool + """Indicates if there are more content pages to fetch.""" + + next_page: Optional[str] = None + """The token for the next page, if any.""" + + object: Literal["vector_store.file_content.page"] + """The object type, which is always `vector_store.file_content.page`""" diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py new file mode 100644 index 00000000..ebf540d0 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["FileUpdateParams"] + + +class FileUpdateParams(TypedDict, total=False): + vector_store_id: Required[str] + + attributes: Required[Optional[Dict[str, Union[str, float, bool]]]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py b/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py new file mode 100644 index 00000000..dc997962 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel +from .vector_store_file_object import VectorStoreFileObject + +__all__ = ["ListVectorStoreFilesResponse"] + + +class ListVectorStoreFilesResponse(BaseModel): + data: List[VectorStoreFileObject] + + first_id: str + + has_more: bool + + last_id: str + + object: str diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py new file mode 100644 index 00000000..3d5aa1bd --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["VectorStoreFileBatchObject", "FileCounts"] + + +class FileCounts(BaseModel): + cancelled: int + """The number of files that where cancelled.""" + + completed: int + """The number of files that have been processed.""" + + failed: int + """The number of files that have failed to process.""" + + in_progress: int + """The number of files that are currently being processed.""" + + total: int + """The total number of files.""" + + +class VectorStoreFileBatchObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """ + The Unix timestamp (in seconds) for when the vector store files batch was + created. + """ + + file_counts: FileCounts + + object: Literal["vector_store.files_batch"] + """The object type, which is always `vector_store.file_batch`.""" + + status: Literal["in_progress", "completed", "cancelled", "failed"] + """ + The status of the vector store files batch, which can be either `in_progress`, + `completed`, `cancelled` or `failed`. + """ + + vector_store_id: str + """ + The ID of the [vector store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py new file mode 100644 index 00000000..e28e28a6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py @@ -0,0 +1,88 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from ..static_chunking_strategy import StaticChunkingStrategy + +__all__ = [ + "VectorStoreFileObject", + "LastError", + "ChunkingStrategy", + "ChunkingStrategyStaticChunkingStrategyResponseParam", + "ChunkingStrategyOtherChunkingStrategyResponseParam", +] + + +class LastError(BaseModel): + code: Literal["server_error", "unsupported_file", "invalid_file"] + """One of `server_error` or `rate_limit_exceeded`.""" + + message: str + """A human-readable description of the error.""" + + +class ChunkingStrategyStaticChunkingStrategyResponseParam(BaseModel): + static: StaticChunkingStrategy + + type: Literal["static"] + """Always `static`.""" + + +class ChunkingStrategyOtherChunkingStrategyResponseParam(BaseModel): + type: Literal["other"] + """Always `other`.""" + + +ChunkingStrategy: TypeAlias = Union[ + ChunkingStrategyStaticChunkingStrategyResponseParam, ChunkingStrategyOtherChunkingStrategyResponseParam +] + + +class VectorStoreFileObject(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the vector store file was created.""" + + last_error: Optional[LastError] = None + """The last error associated with this vector store file. + + Will be `null` if there are no errors. + """ + + object: Literal["vector_store.file"] + """The object type, which is always `vector_store.file`.""" + + status: Literal["in_progress", "completed", "cancelled", "failed"] + """ + The status of the vector store file, which can be either `in_progress`, + `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + vector store file is ready for use. + """ + + usage_bytes: int + """The total vector store usage in bytes. + + Note that this may be different from the original file size. + """ + + vector_store_id: str + """ + The ID of the [vector store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + """ + + attributes: Optional[Dict[str, Union[str, float, bool]]] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters, booleans, or numbers. + """ + + chunking_strategy: Optional[ChunkingStrategy] = None + """The strategy used to chunk the file.""" diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared.py b/src/digitalocean_genai_sdk/types/voice_ids_shared.py new file mode 100644 index 00000000..5679bda3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/voice_ids_shared.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +__all__ = ["VoiceIDsShared"] + +VoiceIDsShared: TypeAlias = Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] +] diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py b/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py new file mode 100644 index 00000000..ccbd853d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +__all__ = ["VoiceIDsSharedParam"] + +VoiceIDsSharedParam: TypeAlias = Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] +] diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call.py b/src/digitalocean_genai_sdk/types/web_search_tool_call.py new file mode 100644 index 00000000..1b57ab87 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/web_search_tool_call.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["WebSearchToolCall"] + + +class WebSearchToolCall(BaseModel): + id: str + """The unique ID of the web search tool call.""" + + status: Literal["in_progress", "searching", "completed", "failed"] + """The status of the web search tool call.""" + + type: Literal["web_search_call"] + """The type of the web search tool call. Always `web_search_call`.""" diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py new file mode 100644 index 00000000..39e5c502 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["WebSearchToolCallParam"] + + +class WebSearchToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the web search tool call.""" + + status: Required[Literal["in_progress", "searching", "completed", "failed"]] + """The status of the web search tool call.""" + + type: Required[Literal["web_search_call"]] + """The type of the web search tool call. Always `web_search_call`.""" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/__init__.py b/tests/api_resources/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/chat/__init__.py b/tests/api_resources/chat/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/chat/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py new file mode 100644 index 00000000..b065b83d --- /dev/null +++ b/tests/api_resources/chat/test_completions.py @@ -0,0 +1,731 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.chat import ( + CreateResponse, + CompletionListResponse, + CompletionDeleteResponse, + CompletionListMessagesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCompletions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "developer", + "name": "name", + } + ], + model="gpt-4o", + audio={ + "format": "wav", + "voice": "ash", + }, + frequency_penalty=-2, + function_call="none", + functions=[ + { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + } + ], + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=0, + max_tokens=0, + metadata={"foo": "string"}, + modalities=["text"], + n=1, + parallel_tool_calls=True, + prediction={ + "content": "string", + "type": "content", + }, + presence_penalty=-2, + reasoning_effort="low", + response_format={"type": "text"}, + seed=0, + service_tier="auto", + stop="\n", + store=True, + stream=True, + stream_options={"include_usage": True}, + temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + "strict": True, + }, + "type": "function", + } + ], + top_logprobs=0, + top_p=1, + user="user-1234", + web_search_options={ + "search_context_size": "low", + "user_location": { + "approximate": { + "city": "city", + "country": "country", + "region": "region", + "timezone": "timezone", + }, + "type": "approximate", + }, + }, + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.retrieve( + "completion_id", + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.completions.with_raw_response.retrieve( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.completions.with_streaming_response.retrieve( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.completions.with_raw_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.completions.with_streaming_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.update( + completion_id="", + metadata={"foo": "string"}, + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.list() + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.list( + after="after", + limit=0, + metadata={"foo": "string"}, + model="model", + order="asc", + ) + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.completions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.completions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.delete( + "completion_id", + ) + assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.completions.with_raw_response.delete( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.completions.with_streaming_response.delete( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_messages(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.list_messages( + completion_id="completion_id", + ) + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_messages_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.chat.completions.list_messages( + completion_id="completion_id", + after="after", + limit=0, + order="asc", + ) + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.completions.with_raw_response.list_messages( + completion_id="completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.completions.with_streaming_response.list_messages( + completion_id="completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_messages(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + client.chat.completions.with_raw_response.list_messages( + completion_id="", + ) + + +class TestAsyncCompletions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "developer", + "name": "name", + } + ], + model="gpt-4o", + audio={ + "format": "wav", + "voice": "ash", + }, + frequency_penalty=-2, + function_call="none", + functions=[ + { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + } + ], + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=0, + max_tokens=0, + metadata={"foo": "string"}, + modalities=["text"], + n=1, + parallel_tool_calls=True, + prediction={ + "content": "string", + "type": "content", + }, + presence_penalty=-2, + reasoning_effort="low", + response_format={"type": "text"}, + seed=0, + service_tier="auto", + stop="\n", + store=True, + stream=True, + stream_options={"include_usage": True}, + temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + "strict": True, + }, + "type": "function", + } + ], + top_logprobs=0, + top_p=1, + user="user-1234", + web_search_options={ + "search_context_size": "low", + "user_location": { + "approximate": { + "city": "city", + "country": "country", + "region": "region", + "timezone": "timezone", + }, + "type": "approximate", + }, + }, + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.retrieve( + "completion_id", + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.completions.with_raw_response.retrieve( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.completions.with_streaming_response.retrieve( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.completions.with_raw_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.completions.with_streaming_response.update( + completion_id="completion_id", + metadata={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.update( + completion_id="", + metadata={"foo": "string"}, + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.list() + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.list( + after="after", + limit=0, + metadata={"foo": "string"}, + model="model", + order="asc", + ) + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.completions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.completions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionListResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.delete( + "completion_id", + ) + assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.completions.with_raw_response.delete( + "completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.completions.with_streaming_response.delete( + "completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.list_messages( + completion_id="completion_id", + ) + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_messages_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.chat.completions.list_messages( + completion_id="completion_id", + after="after", + limit=0, + order="asc", + ) + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.completions.with_raw_response.list_messages( + completion_id="completion_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.completions.with_streaming_response.list_messages( + completion_id="completion_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): + await async_client.chat.completions.with_raw_response.list_messages( + completion_id="", + ) diff --git a/tests/api_resources/fine_tuning/__init__.py b/tests/api_resources/fine_tuning/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/fine_tuning/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/fine_tuning/checkpoints/__init__.py b/tests/api_resources/fine_tuning/checkpoints/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/fine_tuning/checkpoints/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py new file mode 100644 index 00000000..1983d90a --- /dev/null +++ b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py @@ -0,0 +1,309 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.fine_tuning.checkpoints import ( + PermissionDeleteResponse, + ListFineTuningCheckpointPermission, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestPermissions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + permission = client.fine_tuning.checkpoints.permissions.create( + permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids=["string"], + ) + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.checkpoints.permissions.with_raw_response.create( + permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + permission = response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.checkpoints.permissions.with_streaming_response.create( + permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + permission = response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): + client.fine_tuning.checkpoints.permissions.with_raw_response.create( + permission_id="", + project_ids=["string"], + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + permission = client.fine_tuning.checkpoints.permissions.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + permission = client.fine_tuning.checkpoints.permissions.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", + limit=0, + order="ascending", + project_id="project_id", + ) + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + permission = response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + permission = response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): + client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( + permission_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + permission = client.fine_tuning.checkpoints.permissions.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + ) + assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + permission = response.parse() + assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + permission = response.parse() + assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): + client.fine_tuning.checkpoints.permissions.with_raw_response.delete( + "", + ) + + +class TestAsyncPermissions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + permission = await async_client.fine_tuning.checkpoints.permissions.create( + permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids=["string"], + ) + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create( + permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + permission = await response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.create( + permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + permission = await response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): + await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create( + permission_id="", + project_ids=["string"], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + permission = await async_client.fine_tuning.checkpoints.permissions.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + permission = await async_client.fine_tuning.checkpoints.permissions.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", + limit=0, + order="ascending", + project_id="project_id", + ) + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + permission = await response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve( + permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + permission = await response.parse() + assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): + await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( + permission_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + permission = await async_client.fine_tuning.checkpoints.permissions.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + ) + assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + permission = await response.parse() + assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + permission = await response.parse() + assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): + await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/fine_tuning/jobs/__init__.py b/tests/api_resources/fine_tuning/jobs/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/fine_tuning/jobs/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py new file mode 100644 index 00000000..f94416f9 --- /dev/null +++ b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py @@ -0,0 +1,126 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCheckpoints: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + checkpoint = client.fine_tuning.jobs.checkpoints.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + checkpoint = client.fine_tuning.jobs.checkpoints.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", + limit=0, + ) + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + checkpoint = response.parse() + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + checkpoint = response.parse() + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( + fine_tuning_job_id="", + ) + + +class TestAsyncCheckpoints: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", + limit=0, + ) + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + checkpoint = await response.parse() + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + checkpoint = await response.parse() + assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( + fine_tuning_job_id="", + ) diff --git a/tests/api_resources/fine_tuning/jobs/test_events.py b/tests/api_resources/fine_tuning/jobs/test_events.py new file mode 100644 index 00000000..39802767 --- /dev/null +++ b/tests/api_resources/fine_tuning/jobs/test_events.py @@ -0,0 +1,126 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + event = client.fine_tuning.jobs.events.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + event = client.fine_tuning.jobs.events.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", + limit=0, + ) + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.jobs.events.with_raw_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + event = response.parse() + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.jobs.events.with_streaming_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + event = response.parse() + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + client.fine_tuning.jobs.events.with_raw_response.retrieve( + fine_tuning_job_id="", + ) + + +class TestAsyncEvents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + event = await async_client.fine_tuning.jobs.events.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + event = await async_client.fine_tuning.jobs.events.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + after="after", + limit=0, + ) + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.jobs.events.with_raw_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + event = await response.parse() + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.jobs.events.with_streaming_response.retrieve( + fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + event = await response.parse() + assert_matches_type(EventRetrieveResponse, event, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + await async_client.fine_tuning.jobs.events.with_raw_response.retrieve( + fine_tuning_job_id="", + ) diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py new file mode 100644 index 00000000..f0014f09 --- /dev/null +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -0,0 +1,437 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.fine_tuning import ( + FineTuningJob, + JobListResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestJobs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + job = client.fine_tuning.jobs.create( + model="gpt-4o-mini", + training_file="file-abc123", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + job = client.fine_tuning.jobs.create( + model="gpt-4o-mini", + training_file="file-abc123", + hyperparameters={ + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + }, + integrations=[ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "entity": "entity", + "name": "name", + "tags": ["custom-tag"], + }, + } + ], + metadata={"foo": "string"}, + method={ + "dpo": { + "hyperparameters": { + "batch_size": "auto", + "beta": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "supervised": { + "hyperparameters": { + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "type": "supervised", + }, + seed=42, + suffix="x", + validation_file="file-abc123", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.jobs.with_raw_response.create( + model="gpt-4o-mini", + training_file="file-abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.jobs.with_streaming_response.create( + model="gpt-4o-mini", + training_file="file-abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + job = client.fine_tuning.jobs.retrieve( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.jobs.with_raw_response.retrieve( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.jobs.with_streaming_response.retrieve( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + client.fine_tuning.jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + job = client.fine_tuning.jobs.list() + assert_matches_type(JobListResponse, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + job = client.fine_tuning.jobs.list( + after="after", + limit=0, + metadata={"foo": "string"}, + ) + assert_matches_type(JobListResponse, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = response.parse() + assert_matches_type(JobListResponse, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = response.parse() + assert_matches_type(JobListResponse, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: + job = client.fine_tuning.jobs.cancel( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + response = client.fine_tuning.jobs.with_raw_response.cancel( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with client.fine_tuning.jobs.with_streaming_response.cancel( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + client.fine_tuning.jobs.with_raw_response.cancel( + "", + ) + + +class TestAsyncJobs: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + job = await async_client.fine_tuning.jobs.create( + model="gpt-4o-mini", + training_file="file-abc123", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + job = await async_client.fine_tuning.jobs.create( + model="gpt-4o-mini", + training_file="file-abc123", + hyperparameters={ + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + }, + integrations=[ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "entity": "entity", + "name": "name", + "tags": ["custom-tag"], + }, + } + ], + metadata={"foo": "string"}, + method={ + "dpo": { + "hyperparameters": { + "batch_size": "auto", + "beta": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "supervised": { + "hyperparameters": { + "batch_size": "auto", + "learning_rate_multiplier": "auto", + "n_epochs": "auto", + } + }, + "type": "supervised", + }, + seed=42, + suffix="x", + validation_file="file-abc123", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.jobs.with_raw_response.create( + model="gpt-4o-mini", + training_file="file-abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = await response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.jobs.with_streaming_response.create( + model="gpt-4o-mini", + training_file="file-abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = await response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + job = await async_client.fine_tuning.jobs.retrieve( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.jobs.with_raw_response.retrieve( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = await response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.jobs.with_streaming_response.retrieve( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = await response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + await async_client.fine_tuning.jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + job = await async_client.fine_tuning.jobs.list() + assert_matches_type(JobListResponse, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + job = await async_client.fine_tuning.jobs.list( + after="after", + limit=0, + metadata={"foo": "string"}, + ) + assert_matches_type(JobListResponse, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = await response.parse() + assert_matches_type(JobListResponse, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = await response.parse() + assert_matches_type(JobListResponse, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + job = await async_client.fine_tuning.jobs.cancel( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.fine_tuning.jobs.with_raw_response.cancel( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + job = await response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.fine_tuning.jobs.with_streaming_response.cancel( + "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + job = await response.parse() + assert_matches_type(FineTuningJob, job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): + await async_client.fine_tuning.jobs.with_raw_response.cancel( + "", + ) diff --git a/tests/api_resources/organization/__init__.py b/tests/api_resources/organization/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/organization/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/organization/projects/__init__.py b/tests/api_resources/organization/projects/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/organization/projects/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/organization/projects/test_api_keys.py b/tests/api_resources/organization/projects/test_api_keys.py new file mode 100644 index 00000000..d8c6bbc0 --- /dev/null +++ b/tests/api_resources/organization/projects/test_api_keys.py @@ -0,0 +1,338 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization.projects import ( + APIKey, + APIKeyListResponse, + APIKeyDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.organization.projects.api_keys.retrieve( + key_id="key_id", + project_id="project_id", + ) + assert_matches_type(APIKey, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.api_keys.with_raw_response.retrieve( + key_id="key_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKey, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.api_keys.with_streaming_response.retrieve( + key_id="key_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKey, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.api_keys.with_raw_response.retrieve( + key_id="key_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + client.organization.projects.api_keys.with_raw_response.retrieve( + key_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.organization.projects.api_keys.list( + project_id="project_id", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.organization.projects.api_keys.list( + project_id="project_id", + after="after", + limit=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.api_keys.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.api_keys.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.api_keys.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.organization.projects.api_keys.delete( + key_id="key_id", + project_id="project_id", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.api_keys.with_raw_response.delete( + key_id="key_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.api_keys.with_streaming_response.delete( + key_id="key_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.api_keys.with_raw_response.delete( + key_id="key_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + client.organization.projects.api_keys.with_raw_response.delete( + key_id="", + project_id="project_id", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.organization.projects.api_keys.retrieve( + key_id="key_id", + project_id="project_id", + ) + assert_matches_type(APIKey, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.api_keys.with_raw_response.retrieve( + key_id="key_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKey, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.api_keys.with_streaming_response.retrieve( + key_id="key_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKey, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.api_keys.with_raw_response.retrieve( + key_id="key_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + await async_client.organization.projects.api_keys.with_raw_response.retrieve( + key_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.organization.projects.api_keys.list( + project_id="project_id", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.organization.projects.api_keys.list( + project_id="project_id", + after="after", + limit=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.api_keys.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.api_keys.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.api_keys.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.organization.projects.api_keys.delete( + key_id="key_id", + project_id="project_id", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.api_keys.with_raw_response.delete( + key_id="key_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.api_keys.with_streaming_response.delete( + key_id="key_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.api_keys.with_raw_response.delete( + key_id="key_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + await async_client.organization.projects.api_keys.with_raw_response.delete( + key_id="", + project_id="project_id", + ) diff --git a/tests/api_resources/organization/projects/test_rate_limits.py b/tests/api_resources/organization/projects/test_rate_limits.py new file mode 100644 index 00000000..3f7688b4 --- /dev/null +++ b/tests/api_resources/organization/projects/test_rate_limits.py @@ -0,0 +1,265 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization.projects import ( + RateLimit, + RateLimitListResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRateLimits: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + rate_limit = client.organization.projects.rate_limits.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + ) + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + rate_limit = client.organization.projects.rate_limits.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + batch_1_day_max_input_tokens=0, + max_audio_megabytes_per_1_minute=0, + max_images_per_1_minute=0, + max_requests_per_1_day=0, + max_requests_per_1_minute=0, + max_tokens_per_1_minute=0, + ) + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.rate_limits.with_raw_response.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rate_limit = response.parse() + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.rate_limits.with_streaming_response.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rate_limit = response.parse() + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.rate_limits.with_raw_response.update( + rate_limit_id="rate_limit_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"): + client.organization.projects.rate_limits.with_raw_response.update( + rate_limit_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + rate_limit = client.organization.projects.rate_limits.list( + project_id="project_id", + ) + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + rate_limit = client.organization.projects.rate_limits.list( + project_id="project_id", + after="after", + before="before", + limit=0, + ) + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.rate_limits.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rate_limit = response.parse() + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.rate_limits.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rate_limit = response.parse() + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.rate_limits.with_raw_response.list( + project_id="", + ) + + +class TestAsyncRateLimits: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + rate_limit = await async_client.organization.projects.rate_limits.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + ) + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + rate_limit = await async_client.organization.projects.rate_limits.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + batch_1_day_max_input_tokens=0, + max_audio_megabytes_per_1_minute=0, + max_images_per_1_minute=0, + max_requests_per_1_day=0, + max_requests_per_1_minute=0, + max_tokens_per_1_minute=0, + ) + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.rate_limits.with_raw_response.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rate_limit = await response.parse() + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.rate_limits.with_streaming_response.update( + rate_limit_id="rate_limit_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rate_limit = await response.parse() + assert_matches_type(RateLimit, rate_limit, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.rate_limits.with_raw_response.update( + rate_limit_id="rate_limit_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"): + await async_client.organization.projects.rate_limits.with_raw_response.update( + rate_limit_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + rate_limit = await async_client.organization.projects.rate_limits.list( + project_id="project_id", + ) + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + rate_limit = await async_client.organization.projects.rate_limits.list( + project_id="project_id", + after="after", + before="before", + limit=0, + ) + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.rate_limits.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rate_limit = await response.parse() + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.rate_limits.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rate_limit = await response.parse() + assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.rate_limits.with_raw_response.list( + project_id="", + ) diff --git a/tests/api_resources/organization/projects/test_service_accounts.py b/tests/api_resources/organization/projects/test_service_accounts.py new file mode 100644 index 00000000..4cbdbd38 --- /dev/null +++ b/tests/api_resources/organization/projects/test_service_accounts.py @@ -0,0 +1,431 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization.projects import ( + ServiceAccount, + ServiceAccountListResponse, + ServiceAccountCreateResponse, + ServiceAccountDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestServiceAccounts: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + service_account = client.organization.projects.service_accounts.create( + project_id="project_id", + name="name", + ) + assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.service_accounts.with_raw_response.create( + project_id="project_id", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = response.parse() + assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.service_accounts.with_streaming_response.create( + project_id="project_id", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = response.parse() + assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.service_accounts.with_raw_response.create( + project_id="", + name="name", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + service_account = client.organization.projects.service_accounts.retrieve( + service_account_id="service_account_id", + project_id="project_id", + ) + assert_matches_type(ServiceAccount, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.service_accounts.with_raw_response.retrieve( + service_account_id="service_account_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = response.parse() + assert_matches_type(ServiceAccount, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.service_accounts.with_streaming_response.retrieve( + service_account_id="service_account_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = response.parse() + assert_matches_type(ServiceAccount, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.service_accounts.with_raw_response.retrieve( + service_account_id="service_account_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): + client.organization.projects.service_accounts.with_raw_response.retrieve( + service_account_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + service_account = client.organization.projects.service_accounts.list( + project_id="project_id", + ) + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + service_account = client.organization.projects.service_accounts.list( + project_id="project_id", + after="after", + limit=0, + ) + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.service_accounts.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = response.parse() + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.service_accounts.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = response.parse() + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.service_accounts.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + service_account = client.organization.projects.service_accounts.delete( + service_account_id="service_account_id", + project_id="project_id", + ) + assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.service_accounts.with_raw_response.delete( + service_account_id="service_account_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = response.parse() + assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.service_accounts.with_streaming_response.delete( + service_account_id="service_account_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = response.parse() + assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.service_accounts.with_raw_response.delete( + service_account_id="service_account_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): + client.organization.projects.service_accounts.with_raw_response.delete( + service_account_id="", + project_id="project_id", + ) + + +class TestAsyncServiceAccounts: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + service_account = await async_client.organization.projects.service_accounts.create( + project_id="project_id", + name="name", + ) + assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.service_accounts.with_raw_response.create( + project_id="project_id", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = await response.parse() + assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.service_accounts.with_streaming_response.create( + project_id="project_id", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = await response.parse() + assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.service_accounts.with_raw_response.create( + project_id="", + name="name", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + service_account = await async_client.organization.projects.service_accounts.retrieve( + service_account_id="service_account_id", + project_id="project_id", + ) + assert_matches_type(ServiceAccount, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.service_accounts.with_raw_response.retrieve( + service_account_id="service_account_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = await response.parse() + assert_matches_type(ServiceAccount, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.service_accounts.with_streaming_response.retrieve( + service_account_id="service_account_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = await response.parse() + assert_matches_type(ServiceAccount, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.service_accounts.with_raw_response.retrieve( + service_account_id="service_account_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): + await async_client.organization.projects.service_accounts.with_raw_response.retrieve( + service_account_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + service_account = await async_client.organization.projects.service_accounts.list( + project_id="project_id", + ) + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + service_account = await async_client.organization.projects.service_accounts.list( + project_id="project_id", + after="after", + limit=0, + ) + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.service_accounts.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = await response.parse() + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.service_accounts.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = await response.parse() + assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.service_accounts.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + service_account = await async_client.organization.projects.service_accounts.delete( + service_account_id="service_account_id", + project_id="project_id", + ) + assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.service_accounts.with_raw_response.delete( + service_account_id="service_account_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + service_account = await response.parse() + assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.service_accounts.with_streaming_response.delete( + service_account_id="service_account_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + service_account = await response.parse() + assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.service_accounts.with_raw_response.delete( + service_account_id="service_account_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): + await async_client.organization.projects.service_accounts.with_raw_response.delete( + service_account_id="", + project_id="project_id", + ) diff --git a/tests/api_resources/organization/projects/test_users.py b/tests/api_resources/organization/projects/test_users.py new file mode 100644 index 00000000..df2a136e --- /dev/null +++ b/tests/api_resources/organization/projects/test_users.py @@ -0,0 +1,552 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization.projects import ( + ProjectUser, + UserListResponse, + UserDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestUsers: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.projects.users.retrieve( + user_id="user_id", + project_id="project_id", + ) + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.users.with_raw_response.retrieve( + user_id="user_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.users.with_streaming_response.retrieve( + user_id="user_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.users.with_raw_response.retrieve( + user_id="user_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + client.organization.projects.users.with_raw_response.retrieve( + user_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.projects.users.update( + user_id="user_id", + project_id="project_id", + role="owner", + ) + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.users.with_raw_response.update( + user_id="user_id", + project_id="project_id", + role="owner", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.users.with_streaming_response.update( + user_id="user_id", + project_id="project_id", + role="owner", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.users.with_raw_response.update( + user_id="user_id", + project_id="", + role="owner", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + client.organization.projects.users.with_raw_response.update( + user_id="", + project_id="project_id", + role="owner", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.projects.users.list( + project_id="project_id", + ) + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.projects.users.list( + project_id="project_id", + after="after", + limit=0, + ) + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.users.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.users.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.users.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.projects.users.delete( + user_id="user_id", + project_id="project_id", + ) + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.users.with_raw_response.delete( + user_id="user_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.users.with_streaming_response.delete( + user_id="user_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.users.with_raw_response.delete( + user_id="user_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + client.organization.projects.users.with_raw_response.delete( + user_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.projects.users.add( + project_id="project_id", + role="owner", + user_id="user_id", + ) + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.users.with_raw_response.add( + project_id="project_id", + role="owner", + user_id="user_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.users.with_streaming_response.add( + project_id="project_id", + role="owner", + user_id="user_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.users.with_raw_response.add( + project_id="", + role="owner", + user_id="user_id", + ) + + +class TestAsyncUsers: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.projects.users.retrieve( + user_id="user_id", + project_id="project_id", + ) + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.users.with_raw_response.retrieve( + user_id="user_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.users.with_streaming_response.retrieve( + user_id="user_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.retrieve( + user_id="user_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.retrieve( + user_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.projects.users.update( + user_id="user_id", + project_id="project_id", + role="owner", + ) + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.users.with_raw_response.update( + user_id="user_id", + project_id="project_id", + role="owner", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.users.with_streaming_response.update( + user_id="user_id", + project_id="project_id", + role="owner", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.update( + user_id="user_id", + project_id="", + role="owner", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.update( + user_id="", + project_id="project_id", + role="owner", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.projects.users.list( + project_id="project_id", + ) + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.projects.users.list( + project_id="project_id", + after="after", + limit=0, + ) + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.users.with_raw_response.list( + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.users.with_streaming_response.list( + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.projects.users.delete( + user_id="user_id", + project_id="project_id", + ) + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.users.with_raw_response.delete( + user_id="user_id", + project_id="project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.users.with_streaming_response.delete( + user_id="user_id", + project_id="project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.delete( + user_id="user_id", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.delete( + user_id="", + project_id="project_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.projects.users.add( + project_id="project_id", + role="owner", + user_id="user_id", + ) + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.users.with_raw_response.add( + project_id="project_id", + role="owner", + user_id="user_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.users.with_streaming_response.add( + project_id="project_id", + role="owner", + user_id="user_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(ProjectUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.users.with_raw_response.add( + project_id="", + role="owner", + user_id="user_id", + ) diff --git a/tests/api_resources/organization/test_admin_api_keys.py b/tests/api_resources/organization/test_admin_api_keys.py new file mode 100644 index 00000000..0e0949a1 --- /dev/null +++ b/tests/api_resources/organization/test_admin_api_keys.py @@ -0,0 +1,338 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization import ( + AdminAPIKey, + AdminAPIKeyListResponse, + AdminAPIKeyDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAdminAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + admin_api_key = client.organization.admin_api_keys.create( + name="New Admin Key", + ) + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.admin_api_keys.with_raw_response.create( + name="New Admin Key", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.admin_api_keys.with_streaming_response.create( + name="New Admin Key", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + admin_api_key = client.organization.admin_api_keys.retrieve( + "key_id", + ) + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.admin_api_keys.with_raw_response.retrieve( + "key_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.admin_api_keys.with_streaming_response.retrieve( + "key_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + client.organization.admin_api_keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + admin_api_key = client.organization.admin_api_keys.list() + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + admin_api_key = client.organization.admin_api_keys.list( + after="after", + limit=0, + order="asc", + ) + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.admin_api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = response.parse() + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.admin_api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = response.parse() + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + admin_api_key = client.organization.admin_api_keys.delete( + "key_id", + ) + assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.admin_api_keys.with_raw_response.delete( + "key_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = response.parse() + assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.admin_api_keys.with_streaming_response.delete( + "key_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = response.parse() + assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + client.organization.admin_api_keys.with_raw_response.delete( + "", + ) + + +class TestAsyncAdminAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + admin_api_key = await async_client.organization.admin_api_keys.create( + name="New Admin Key", + ) + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.admin_api_keys.with_raw_response.create( + name="New Admin Key", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.admin_api_keys.with_streaming_response.create( + name="New Admin Key", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + admin_api_key = await async_client.organization.admin_api_keys.retrieve( + "key_id", + ) + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.admin_api_keys.with_raw_response.retrieve( + "key_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.admin_api_keys.with_streaming_response.retrieve( + "key_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + await async_client.organization.admin_api_keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + admin_api_key = await async_client.organization.admin_api_keys.list() + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + admin_api_key = await async_client.organization.admin_api_keys.list( + after="after", + limit=0, + order="asc", + ) + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.admin_api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.admin_api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + admin_api_key = await async_client.organization.admin_api_keys.delete( + "key_id", + ) + assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.admin_api_keys.with_raw_response.delete( + "key_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.admin_api_keys.with_streaming_response.delete( + "key_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + admin_api_key = await response.parse() + assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): + await async_client.organization.admin_api_keys.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/organization/test_invites.py b/tests/api_resources/organization/test_invites.py new file mode 100644 index 00000000..73528d26 --- /dev/null +++ b/tests/api_resources/organization/test_invites.py @@ -0,0 +1,372 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization import ( + Invite, + InviteListResponse, + InviteDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestInvites: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + invite = client.organization.invites.create( + email="email", + role="reader", + ) + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + invite = client.organization.invites.create( + email="email", + role="reader", + projects=[ + { + "id": "id", + "role": "member", + } + ], + ) + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.invites.with_raw_response.create( + email="email", + role="reader", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.invites.with_streaming_response.create( + email="email", + role="reader", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + invite = client.organization.invites.retrieve( + "invite_id", + ) + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.invites.with_raw_response.retrieve( + "invite_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.invites.with_streaming_response.retrieve( + "invite_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): + client.organization.invites.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + invite = client.organization.invites.list() + assert_matches_type(InviteListResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + invite = client.organization.invites.list( + after="after", + limit=0, + ) + assert_matches_type(InviteListResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.invites.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = response.parse() + assert_matches_type(InviteListResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.invites.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = response.parse() + assert_matches_type(InviteListResponse, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + invite = client.organization.invites.delete( + "invite_id", + ) + assert_matches_type(InviteDeleteResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.invites.with_raw_response.delete( + "invite_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = response.parse() + assert_matches_type(InviteDeleteResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.invites.with_streaming_response.delete( + "invite_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = response.parse() + assert_matches_type(InviteDeleteResponse, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): + client.organization.invites.with_raw_response.delete( + "", + ) + + +class TestAsyncInvites: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + invite = await async_client.organization.invites.create( + email="email", + role="reader", + ) + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + invite = await async_client.organization.invites.create( + email="email", + role="reader", + projects=[ + { + "id": "id", + "role": "member", + } + ], + ) + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.invites.with_raw_response.create( + email="email", + role="reader", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = await response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.invites.with_streaming_response.create( + email="email", + role="reader", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = await response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + invite = await async_client.organization.invites.retrieve( + "invite_id", + ) + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.invites.with_raw_response.retrieve( + "invite_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = await response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.invites.with_streaming_response.retrieve( + "invite_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = await response.parse() + assert_matches_type(Invite, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): + await async_client.organization.invites.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + invite = await async_client.organization.invites.list() + assert_matches_type(InviteListResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + invite = await async_client.organization.invites.list( + after="after", + limit=0, + ) + assert_matches_type(InviteListResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.invites.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = await response.parse() + assert_matches_type(InviteListResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.invites.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = await response.parse() + assert_matches_type(InviteListResponse, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + invite = await async_client.organization.invites.delete( + "invite_id", + ) + assert_matches_type(InviteDeleteResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.invites.with_raw_response.delete( + "invite_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + invite = await response.parse() + assert_matches_type(InviteDeleteResponse, invite, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.invites.with_streaming_response.delete( + "invite_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + invite = await response.parse() + assert_matches_type(InviteDeleteResponse, invite, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): + await async_client.organization.invites.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/organization/test_projects.py b/tests/api_resources/organization/test_projects.py new file mode 100644 index 00000000..6b9dd9a4 --- /dev/null +++ b/tests/api_resources/organization/test_projects.py @@ -0,0 +1,429 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization import ( + Project, + ProjectListResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestProjects: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + project = client.organization.projects.create( + name="name", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.with_raw_response.create( + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.with_streaming_response.create( + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + project = client.organization.projects.retrieve( + "project_id", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.with_raw_response.retrieve( + "project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.with_streaming_response.retrieve( + "project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + project = client.organization.projects.update( + project_id="project_id", + name="name", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.with_raw_response.update( + project_id="project_id", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.with_streaming_response.update( + project_id="project_id", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.with_raw_response.update( + project_id="", + name="name", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + project = client.organization.projects.list() + assert_matches_type(ProjectListResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + project = client.organization.projects.list( + after="after", + include_archived=True, + limit=0, + ) + assert_matches_type(ProjectListResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = response.parse() + assert_matches_type(ProjectListResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = response.parse() + assert_matches_type(ProjectListResponse, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_archive(self, client: DigitaloceanGenaiSDK) -> None: + project = client.organization.projects.archive( + "project_id", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_archive(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.projects.with_raw_response.archive( + "project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_archive(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.projects.with_streaming_response.archive( + "project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_archive(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.organization.projects.with_raw_response.archive( + "", + ) + + +class TestAsyncProjects: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + project = await async_client.organization.projects.create( + name="name", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.with_raw_response.create( + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.with_streaming_response.create( + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + project = await async_client.organization.projects.retrieve( + "project_id", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.with_raw_response.retrieve( + "project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.with_streaming_response.retrieve( + "project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + project = await async_client.organization.projects.update( + project_id="project_id", + name="name", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.with_raw_response.update( + project_id="project_id", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.with_streaming_response.update( + project_id="project_id", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.with_raw_response.update( + project_id="", + name="name", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + project = await async_client.organization.projects.list() + assert_matches_type(ProjectListResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + project = await async_client.organization.projects.list( + after="after", + include_archived=True, + limit=0, + ) + assert_matches_type(ProjectListResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = await response.parse() + assert_matches_type(ProjectListResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = await response.parse() + assert_matches_type(ProjectListResponse, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + project = await async_client.organization.projects.archive( + "project_id", + ) + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.projects.with_raw_response.archive( + "project_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.projects.with_streaming_response.archive( + "project_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = await response.parse() + assert_matches_type(Project, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.organization.projects.with_raw_response.archive( + "", + ) diff --git a/tests/api_resources/organization/test_usage.py b/tests/api_resources/organization/test_usage.py new file mode 100644 index 00000000..198f2159 --- /dev/null +++ b/tests/api_resources/organization/test_usage.py @@ -0,0 +1,834 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import UsageResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestUsage: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.audio_speeches( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_audio_speeches_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.audio_speeches( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.audio_speeches( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.audio_speeches( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.audio_transcriptions( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_audio_transcriptions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.audio_transcriptions( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.audio_transcriptions( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.audio_transcriptions( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.code_interpreter_sessions( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_code_interpreter_sessions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.code_interpreter_sessions( + start_time=0, + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + page="page", + project_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.code_interpreter_sessions( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.code_interpreter_sessions( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_completions(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.completions( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_completions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.completions( + start_time=0, + api_key_ids=["string"], + batch=True, + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_completions(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.completions( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_completions(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.completions( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_embeddings(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.embeddings( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_embeddings_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.embeddings( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.embeddings( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.embeddings( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_images(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.images( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_images_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.images( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + sizes=["256x256"], + sources=["image.generation"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_images(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.images( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_images(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.images( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_moderations(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.moderations( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_moderations_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.moderations( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_moderations(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.moderations( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_moderations(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.moderations( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_vector_stores(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.vector_stores( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_vector_stores_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + usage = client.organization.usage.vector_stores( + start_time=0, + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + page="page", + project_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.usage.with_raw_response.vector_stores( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.usage.with_streaming_response.vector_stores( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncUsage: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.audio_speeches( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_audio_speeches_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.audio_speeches( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.audio_speeches( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.audio_speeches( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.audio_transcriptions( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_audio_transcriptions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.audio_transcriptions( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.audio_transcriptions( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.audio_transcriptions( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.code_interpreter_sessions( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_code_interpreter_sessions_with_all_params( + self, async_client: AsyncDigitaloceanGenaiSDK + ) -> None: + usage = await async_client.organization.usage.code_interpreter_sessions( + start_time=0, + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + page="page", + project_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.code_interpreter_sessions( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.code_interpreter_sessions( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.completions( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_completions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.completions( + start_time=0, + api_key_ids=["string"], + batch=True, + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.completions( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.completions( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.embeddings( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_embeddings_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.embeddings( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.embeddings( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.embeddings( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.images( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_images_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.images( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + sizes=["256x256"], + sources=["image.generation"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.images( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.images( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.moderations( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_moderations_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.moderations( + start_time=0, + api_key_ids=["string"], + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + models=["string"], + page="page", + project_ids=["string"], + user_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.moderations( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.moderations( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.vector_stores( + start_time=0, + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_vector_stores_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + usage = await async_client.organization.usage.vector_stores( + start_time=0, + bucket_width="1m", + end_time=0, + group_by=["project_id"], + limit=0, + page="page", + project_ids=["string"], + ) + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.usage.with_raw_response.vector_stores( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.usage.with_streaming_response.vector_stores( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + usage = await response.parse() + assert_matches_type(UsageResponse, usage, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/organization/test_users.py b/tests/api_resources/organization/test_users.py new file mode 100644 index 00000000..b40fcbef --- /dev/null +++ b/tests/api_resources/organization/test_users.py @@ -0,0 +1,362 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.organization import ( + OrganizationUser, + UserListResponse, + UserDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestUsers: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.users.retrieve( + "user_id", + ) + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.users.with_raw_response.retrieve( + "user_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.users.with_streaming_response.retrieve( + "user_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + client.organization.users.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.users.update( + user_id="user_id", + role="owner", + ) + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.users.with_raw_response.update( + user_id="user_id", + role="owner", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.users.with_streaming_response.update( + user_id="user_id", + role="owner", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + client.organization.users.with_raw_response.update( + user_id="", + role="owner", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.users.list() + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.users.list( + after="after", + emails=["string"], + limit=0, + ) + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.users.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.users.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + user = client.organization.users.delete( + "user_id", + ) + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.users.with_raw_response.delete( + "user_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.users.with_streaming_response.delete( + "user_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + client.organization.users.with_raw_response.delete( + "", + ) + + +class TestAsyncUsers: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.users.retrieve( + "user_id", + ) + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.users.with_raw_response.retrieve( + "user_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.users.with_streaming_response.retrieve( + "user_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + await async_client.organization.users.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.users.update( + user_id="user_id", + role="owner", + ) + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.users.with_raw_response.update( + user_id="user_id", + role="owner", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.users.with_streaming_response.update( + user_id="user_id", + role="owner", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(OrganizationUser, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + await async_client.organization.users.with_raw_response.update( + user_id="", + role="owner", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.users.list() + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.users.list( + after="after", + emails=["string"], + limit=0, + ) + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.users.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.users.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(UserListResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + user = await async_client.organization.users.delete( + "user_id", + ) + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.users.with_raw_response.delete( + "user_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + user = await response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.users.with_streaming_response.delete( + "user_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + user = await response.parse() + assert_matches_type(UserDeleteResponse, user, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): + await async_client.organization.users.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_assistants.py b/tests/api_resources/test_assistants.py new file mode 100644 index 00000000..a5fa998d --- /dev/null +++ b/tests/api_resources/test_assistants.py @@ -0,0 +1,528 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + AssistantObject, + AssistantListResponse, + AssistantDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAssistants: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.create( + model="gpt-4o", + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.create( + model="gpt-4o", + description="description", + instructions="instructions", + metadata={"foo": "string"}, + name="name", + reasoning_effort="low", + response_format="auto", + temperature=1, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, + tools=[{"type": "code_interpreter"}], + top_p=1, + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.assistants.with_raw_response.create( + model="gpt-4o", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.assistants.with_streaming_response.create( + model="gpt-4o", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.retrieve( + "assistant_id", + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.assistants.with_raw_response.retrieve( + "assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.assistants.with_streaming_response.retrieve( + "assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): + client.assistants.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.update( + assistant_id="assistant_id", + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.update( + assistant_id="assistant_id", + description="description", + instructions="instructions", + metadata={"foo": "string"}, + model="string", + name="name", + reasoning_effort="low", + response_format="auto", + temperature=1, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": {"vector_store_ids": ["string"]}, + }, + tools=[{"type": "code_interpreter"}], + top_p=1, + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.assistants.with_raw_response.update( + assistant_id="assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.assistants.with_streaming_response.update( + assistant_id="assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): + client.assistants.with_raw_response.update( + assistant_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.list() + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.list( + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.assistants.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = response.parse() + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.assistants.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = response.parse() + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + assistant = client.assistants.delete( + "assistant_id", + ) + assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.assistants.with_raw_response.delete( + "assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = response.parse() + assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.assistants.with_streaming_response.delete( + "assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = response.parse() + assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): + client.assistants.with_raw_response.delete( + "", + ) + + +class TestAsyncAssistants: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.create( + model="gpt-4o", + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.create( + model="gpt-4o", + description="description", + instructions="instructions", + metadata={"foo": "string"}, + name="name", + reasoning_effort="low", + response_format="auto", + temperature=1, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, + tools=[{"type": "code_interpreter"}], + top_p=1, + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.assistants.with_raw_response.create( + model="gpt-4o", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = await response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.assistants.with_streaming_response.create( + model="gpt-4o", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = await response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.retrieve( + "assistant_id", + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.assistants.with_raw_response.retrieve( + "assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = await response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.assistants.with_streaming_response.retrieve( + "assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = await response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): + await async_client.assistants.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.update( + assistant_id="assistant_id", + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.update( + assistant_id="assistant_id", + description="description", + instructions="instructions", + metadata={"foo": "string"}, + model="string", + name="name", + reasoning_effort="low", + response_format="auto", + temperature=1, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": {"vector_store_ids": ["string"]}, + }, + tools=[{"type": "code_interpreter"}], + top_p=1, + ) + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.assistants.with_raw_response.update( + assistant_id="assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = await response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.assistants.with_streaming_response.update( + assistant_id="assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = await response.parse() + assert_matches_type(AssistantObject, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): + await async_client.assistants.with_raw_response.update( + assistant_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.list() + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.list( + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.assistants.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = await response.parse() + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.assistants.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = await response.parse() + assert_matches_type(AssistantListResponse, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + assistant = await async_client.assistants.delete( + "assistant_id", + ) + assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.assistants.with_raw_response.delete( + "assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + assistant = await response.parse() + assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.assistants.with_streaming_response.delete( + "assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + assistant = await response.parse() + assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): + await async_client.assistants.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_audio.py b/tests/api_resources/test_audio.py new file mode 100644 index 00000000..e71d568e --- /dev/null +++ b/tests/api_resources/test_audio.py @@ -0,0 +1,383 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import httpx +import pytest +from respx import MockRouter + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + AudioTranslateAudioResponse, + AudioTranscribeAudioResponse, +) +from digitalocean_genai_sdk._response import ( + BinaryAPIResponse, + AsyncBinaryAPIResponse, + StreamedBinaryAPIResponse, + AsyncStreamedBinaryAPIResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAudio: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_method_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + audio = client.audio.generate_speech( + input="input", + model="string", + voice="ash", + ) + assert audio.is_closed + assert audio.json() == {"foo": "bar"} + assert cast(Any, audio.is_closed) is True + assert isinstance(audio, BinaryAPIResponse) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_method_generate_speech_with_all_params(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + audio = client.audio.generate_speech( + input="input", + model="string", + voice="ash", + instructions="instructions", + response_format="mp3", + speed=0.25, + ) + assert audio.is_closed + assert audio.json() == {"foo": "bar"} + assert cast(Any, audio.is_closed) is True + assert isinstance(audio, BinaryAPIResponse) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_raw_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + audio = client.audio.with_raw_response.generate_speech( + input="input", + model="string", + voice="ash", + ) + + assert audio.is_closed is True + assert audio.http_request.headers.get("X-Stainless-Lang") == "python" + assert audio.json() == {"foo": "bar"} + assert isinstance(audio, BinaryAPIResponse) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + def test_streaming_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + with client.audio.with_streaming_response.generate_speech( + input="input", + model="string", + voice="ash", + ) as audio: + assert not audio.is_closed + assert audio.http_request.headers.get("X-Stainless-Lang") == "python" + + assert audio.json() == {"foo": "bar"} + assert cast(Any, audio.is_closed) is True + assert isinstance(audio, StreamedBinaryAPIResponse) + + assert cast(Any, audio.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None: + audio = client.audio.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + ) + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_transcribe_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + audio = client.audio.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + include=["logprobs"], + language="language", + prompt="prompt", + response_format="json", + stream=True, + temperature=0, + timestamp_granularities=["word"], + ) + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None: + response = client.audio.with_raw_response.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + audio = response.parse() + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None: + with client.audio.with_streaming_response.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + audio = response.parse() + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_translate_audio(self, client: DigitaloceanGenaiSDK) -> None: + audio = client.audio.translate_audio( + file=b"raw file contents", + model="whisper-1", + ) + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_translate_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + audio = client.audio.translate_audio( + file=b"raw file contents", + model="whisper-1", + prompt="prompt", + response_format="json", + temperature=0, + ) + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None: + response = client.audio.with_raw_response.translate_audio( + file=b"raw file contents", + model="whisper-1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + audio = response.parse() + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None: + with client.audio.with_streaming_response.translate_audio( + file=b"raw file contents", + model="whisper-1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + audio = response.parse() + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncAudio: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_method_generate_speech( + self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter + ) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + audio = await async_client.audio.generate_speech( + input="input", + model="string", + voice="ash", + ) + assert audio.is_closed + assert await audio.json() == {"foo": "bar"} + assert cast(Any, audio.is_closed) is True + assert isinstance(audio, AsyncBinaryAPIResponse) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_method_generate_speech_with_all_params( + self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter + ) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + audio = await async_client.audio.generate_speech( + input="input", + model="string", + voice="ash", + instructions="instructions", + response_format="mp3", + speed=0.25, + ) + assert audio.is_closed + assert await audio.json() == {"foo": "bar"} + assert cast(Any, audio.is_closed) is True + assert isinstance(audio, AsyncBinaryAPIResponse) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_raw_response_generate_speech( + self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter + ) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + audio = await async_client.audio.with_raw_response.generate_speech( + input="input", + model="string", + voice="ash", + ) + + assert audio.is_closed is True + assert audio.http_request.headers.get("X-Stainless-Lang") == "python" + assert await audio.json() == {"foo": "bar"} + assert isinstance(audio, AsyncBinaryAPIResponse) + + @pytest.mark.skip() + @parametrize + @pytest.mark.respx(base_url=base_url) + async def test_streaming_response_generate_speech( + self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter + ) -> None: + respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + async with async_client.audio.with_streaming_response.generate_speech( + input="input", + model="string", + voice="ash", + ) as audio: + assert not audio.is_closed + assert audio.http_request.headers.get("X-Stainless-Lang") == "python" + + assert await audio.json() == {"foo": "bar"} + assert cast(Any, audio.is_closed) is True + assert isinstance(audio, AsyncStreamedBinaryAPIResponse) + + assert cast(Any, audio.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + audio = await async_client.audio.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + ) + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_transcribe_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + audio = await async_client.audio.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + include=["logprobs"], + language="language", + prompt="prompt", + response_format="json", + stream=True, + temperature=0, + timestamp_granularities=["word"], + ) + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.audio.with_raw_response.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + audio = await response.parse() + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.audio.with_streaming_response.transcribe_audio( + file=b"raw file contents", + model="gpt-4o-transcribe", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + audio = await response.parse() + assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + audio = await async_client.audio.translate_audio( + file=b"raw file contents", + model="whisper-1", + ) + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_translate_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + audio = await async_client.audio.translate_audio( + file=b"raw file contents", + model="whisper-1", + prompt="prompt", + response_format="json", + temperature=0, + ) + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.audio.with_raw_response.translate_audio( + file=b"raw file contents", + model="whisper-1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + audio = await response.parse() + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.audio.with_streaming_response.translate_audio( + file=b"raw file contents", + model="whisper-1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + audio = await response.parse() + assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py new file mode 100644 index 00000000..6ad0bbee --- /dev/null +++ b/tests/api_resources/test_batches.py @@ -0,0 +1,366 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import Batch, BatchListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestBatches: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + batch = client.batches.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + batch = client.batches.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + metadata={"foo": "string"}, + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.batches.with_raw_response.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.batches.with_streaming_response.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + batch = client.batches.retrieve( + "batch_id", + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.batches.with_raw_response.retrieve( + "batch_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.batches.with_streaming_response.retrieve( + "batch_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + client.batches.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + batch = client.batches.list() + assert_matches_type(BatchListResponse, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + batch = client.batches.list( + after="after", + limit=0, + ) + assert_matches_type(BatchListResponse, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.batches.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = response.parse() + assert_matches_type(BatchListResponse, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.batches.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = response.parse() + assert_matches_type(BatchListResponse, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: + batch = client.batches.cancel( + "batch_id", + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + response = client.batches.with_raw_response.cancel( + "batch_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with client.batches.with_streaming_response.cancel( + "batch_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + client.batches.with_raw_response.cancel( + "", + ) + + +class TestAsyncBatches: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + batch = await async_client.batches.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + batch = await async_client.batches.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + metadata={"foo": "string"}, + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.batches.with_raw_response.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = await response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.batches.with_streaming_response.create( + completion_window="24h", + endpoint="/v1/responses", + input_file_id="input_file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = await response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + batch = await async_client.batches.retrieve( + "batch_id", + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.batches.with_raw_response.retrieve( + "batch_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = await response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.batches.with_streaming_response.retrieve( + "batch_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = await response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + await async_client.batches.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + batch = await async_client.batches.list() + assert_matches_type(BatchListResponse, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + batch = await async_client.batches.list( + after="after", + limit=0, + ) + assert_matches_type(BatchListResponse, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.batches.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = await response.parse() + assert_matches_type(BatchListResponse, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.batches.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = await response.parse() + assert_matches_type(BatchListResponse, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + batch = await async_client.batches.cancel( + "batch_id", + ) + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.batches.with_raw_response.cancel( + "batch_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + batch = await response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.batches.with_streaming_response.cancel( + "batch_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + batch = await response.parse() + assert_matches_type(Batch, batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + await async_client.batches.with_raw_response.cancel( + "", + ) diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py new file mode 100644 index 00000000..eb5c1abd --- /dev/null +++ b/tests/api_resources/test_completions.py @@ -0,0 +1,148 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + CompletionCreateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCompletions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.completions.create( + model="string", + prompt="This is a test.", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + completion = client.completions.create( + model="string", + prompt="This is a test.", + best_of=0, + echo=True, + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=0, + max_tokens=16, + n=1, + presence_penalty=-2, + seed=0, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + suffix="test.", + temperature=1, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.completions.with_raw_response.create( + model="string", + prompt="This is a test.", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.completions.with_streaming_response.create( + model="string", + prompt="This is a test.", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncCompletions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.completions.create( + model="string", + prompt="This is a test.", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + completion = await async_client.completions.create( + model="string", + prompt="This is a test.", + best_of=0, + echo=True, + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=0, + max_tokens=16, + n=1, + presence_penalty=-2, + seed=0, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + suffix="test.", + temperature=1, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.completions.with_raw_response.create( + model="string", + prompt="This is a test.", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.completions.with_streaming_response.create( + model="string", + prompt="This is a test.", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py new file mode 100644 index 00000000..bd3ef322 --- /dev/null +++ b/tests/api_resources/test_embeddings.py @@ -0,0 +1,120 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import EmbeddingCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEmbeddings: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + embedding = client.embeddings.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + embedding = client.embeddings.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + dimensions=1, + encoding_format="float", + user="user-1234", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.embeddings.with_raw_response.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + embedding = response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.embeddings.with_streaming_response.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + embedding = response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncEmbeddings: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + embedding = await async_client.embeddings.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + embedding = await async_client.embeddings.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + dimensions=1, + encoding_format="float", + user="user-1234", + ) + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.embeddings.with_raw_response.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + embedding = await response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.embeddings.with_streaming_response.create( + input="The quick brown fox jumped over the lazy dog", + model="text-embedding-3-small", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + embedding = await response.parse() + assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py new file mode 100644 index 00000000..b30ae859 --- /dev/null +++ b/tests/api_resources/test_files.py @@ -0,0 +1,430 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + OpenAIFile, + FileListResponse, + FileDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFiles: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + file = client.files.retrieve( + "file_id", + ) + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.files.with_raw_response.retrieve( + "file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.files.with_streaming_response.retrieve( + "file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + file = client.files.list() + assert_matches_type(FileListResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + file = client.files.list( + after="after", + limit=0, + order="asc", + purpose="purpose", + ) + assert_matches_type(FileListResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.files.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileListResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.files.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileListResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + file = client.files.delete( + "file_id", + ) + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.files.with_raw_response.delete( + "file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.files.with_streaming_response.delete( + "file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + file = client.files.retrieve_content( + "file_id", + ) + assert_matches_type(str, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + response = client.files.with_raw_response.retrieve_content( + "file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(str, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + with client.files.with_streaming_response.retrieve_content( + "file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(str, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.files.with_raw_response.retrieve_content( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_upload(self, client: DigitaloceanGenaiSDK) -> None: + file = client.files.upload( + file=b"raw file contents", + purpose="assistants", + ) + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_upload(self, client: DigitaloceanGenaiSDK) -> None: + response = client.files.with_raw_response.upload( + file=b"raw file contents", + purpose="assistants", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_upload(self, client: DigitaloceanGenaiSDK) -> None: + with client.files.with_streaming_response.upload( + file=b"raw file contents", + purpose="assistants", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncFiles: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.files.retrieve( + "file_id", + ) + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.files.with_raw_response.retrieve( + "file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.files.with_streaming_response.retrieve( + "file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.files.list() + assert_matches_type(FileListResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.files.list( + after="after", + limit=0, + order="asc", + purpose="purpose", + ) + assert_matches_type(FileListResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.files.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileListResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.files.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileListResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.files.delete( + "file_id", + ) + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.files.with_raw_response.delete( + "file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.files.with_streaming_response.delete( + "file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.files.retrieve_content( + "file_id", + ) + assert_matches_type(str, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.files.with_raw_response.retrieve_content( + "file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(str, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.files.with_streaming_response.retrieve_content( + "file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(str, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.files.with_raw_response.retrieve_content( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.files.upload( + file=b"raw file contents", + purpose="assistants", + ) + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.files.with_raw_response.upload( + file=b"raw file contents", + purpose="assistants", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.files.with_streaming_response.upload( + file=b"raw file contents", + purpose="assistants", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(OpenAIFile, file, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py new file mode 100644 index 00000000..380a0759 --- /dev/null +++ b/tests/api_resources/test_images.py @@ -0,0 +1,320 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + ImagesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestImages: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_edit(self, client: DigitaloceanGenaiSDK) -> None: + image = client.images.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_edit_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + image = client.images.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + mask=b"raw file contents", + model="dall-e-2", + n=1, + response_format="url", + size="1024x1024", + user="user-1234", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None: + response = client.images.with_raw_response.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None: + with client.images.with_streaming_response.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_generation(self, client: DigitaloceanGenaiSDK) -> None: + image = client.images.create_generation( + prompt="A cute baby sea otter", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_generation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + image = client.images.create_generation( + prompt="A cute baby sea otter", + model="dall-e-3", + n=1, + quality="standard", + response_format="url", + size="1024x1024", + style="vivid", + user="user-1234", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None: + response = client.images.with_raw_response.create_generation( + prompt="A cute baby sea otter", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None: + with client.images.with_streaming_response.create_generation( + prompt="A cute baby sea otter", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_variation(self, client: DigitaloceanGenaiSDK) -> None: + image = client.images.create_variation( + image=b"raw file contents", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_variation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + image = client.images.create_variation( + image=b"raw file contents", + model="dall-e-2", + n=1, + response_format="url", + size="1024x1024", + user="user-1234", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None: + response = client.images.with_raw_response.create_variation( + image=b"raw file contents", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None: + with client.images.with_streaming_response.create_variation( + image=b"raw file contents", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncImages: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + image = await async_client.images.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_edit_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + image = await async_client.images.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + mask=b"raw file contents", + model="dall-e-2", + n=1, + response_format="url", + size="1024x1024", + user="user-1234", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.images.with_raw_response.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.images.with_streaming_response.create_edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + image = await async_client.images.create_generation( + prompt="A cute baby sea otter", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_generation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + image = await async_client.images.create_generation( + prompt="A cute baby sea otter", + model="dall-e-3", + n=1, + quality="standard", + response_format="url", + size="1024x1024", + style="vivid", + user="user-1234", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.images.with_raw_response.create_generation( + prompt="A cute baby sea otter", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.images.with_streaming_response.create_generation( + prompt="A cute baby sea otter", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + image = await async_client.images.create_variation( + image=b"raw file contents", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_variation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + image = await async_client.images.create_variation( + image=b"raw file contents", + model="dall-e-2", + n=1, + response_format="url", + size="1024x1024", + user="user-1234", + ) + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.images.with_raw_response.create_variation( + image=b"raw file contents", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.images.with_streaming_response.create_variation( + image=b"raw file contents", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImagesResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py new file mode 100644 index 00000000..aa215415 --- /dev/null +++ b/tests/api_resources/test_models.py @@ -0,0 +1,246 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModels: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + model = client.models.retrieve( + "gpt-4o-mini", + ) + assert_matches_type(Model, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.models.with_raw_response.retrieve( + "gpt-4o-mini", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(Model, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.models.with_streaming_response.retrieve( + "gpt-4o-mini", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(Model, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + model = client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + model = client.models.delete( + "ft:gpt-4o-mini:acemeco:suffix:abc123", + ) + assert_matches_type(ModelDeleteResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.models.with_raw_response.delete( + "ft:gpt-4o-mini:acemeco:suffix:abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelDeleteResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.models.with_streaming_response.delete( + "ft:gpt-4o-mini:acemeco:suffix:abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelDeleteResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.models.with_raw_response.delete( + "", + ) + + +class TestAsyncModels: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + model = await async_client.models.retrieve( + "gpt-4o-mini", + ) + assert_matches_type(Model, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.models.with_raw_response.retrieve( + "gpt-4o-mini", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(Model, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.models.with_streaming_response.retrieve( + "gpt-4o-mini", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(Model, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + model = await async_client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + model = await async_client.models.delete( + "ft:gpt-4o-mini:acemeco:suffix:abc123", + ) + assert_matches_type(ModelDeleteResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.models.with_raw_response.delete( + "ft:gpt-4o-mini:acemeco:suffix:abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelDeleteResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.models.with_streaming_response.delete( + "ft:gpt-4o-mini:acemeco:suffix:abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelDeleteResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.models.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py new file mode 100644 index 00000000..79d34625 --- /dev/null +++ b/tests/api_resources/test_moderations.py @@ -0,0 +1,108 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ModerationClassifyResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModerations: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_classify(self, client: DigitaloceanGenaiSDK) -> None: + moderation = client.moderations.classify( + input="I want to kill them.", + ) + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_classify_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + moderation = client.moderations.classify( + input="I want to kill them.", + model="omni-moderation-2024-09-26", + ) + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_classify(self, client: DigitaloceanGenaiSDK) -> None: + response = client.moderations.with_raw_response.classify( + input="I want to kill them.", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + moderation = response.parse() + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_classify(self, client: DigitaloceanGenaiSDK) -> None: + with client.moderations.with_streaming_response.classify( + input="I want to kill them.", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + moderation = response.parse() + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModerations: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + moderation = await async_client.moderations.classify( + input="I want to kill them.", + ) + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_classify_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + moderation = await async_client.moderations.classify( + input="I want to kill them.", + model="omni-moderation-2024-09-26", + ) + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.moderations.with_raw_response.classify( + input="I want to kill them.", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + moderation = await response.parse() + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.moderations.with_streaming_response.classify( + input="I want to kill them.", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + moderation = await response.parse() + assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_organization.py b/tests/api_resources/test_organization.py new file mode 100644 index 00000000..844ed287 --- /dev/null +++ b/tests/api_resources/test_organization.py @@ -0,0 +1,219 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + UsageResponse, + OrganizationListAuditLogsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestOrganization: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_get_costs(self, client: DigitaloceanGenaiSDK) -> None: + organization = client.organization.get_costs( + start_time=0, + ) + assert_matches_type(UsageResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_get_costs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + organization = client.organization.get_costs( + start_time=0, + bucket_width="1d", + end_time=0, + group_by=["project_id"], + limit=0, + page="page", + project_ids=["string"], + ) + assert_matches_type(UsageResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.with_raw_response.get_costs( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + organization = response.parse() + assert_matches_type(UsageResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.with_streaming_response.get_costs( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + organization = response.parse() + assert_matches_type(UsageResponse, organization, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None: + organization = client.organization.list_audit_logs() + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_audit_logs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + organization = client.organization.list_audit_logs( + actor_emails=["string"], + actor_ids=["string"], + after="after", + before="before", + effective_at={ + "gt": 0, + "gte": 0, + "lt": 0, + "lte": 0, + }, + event_types=["api_key.created"], + limit=0, + project_ids=["string"], + resource_ids=["string"], + ) + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None: + response = client.organization.with_raw_response.list_audit_logs() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + organization = response.parse() + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None: + with client.organization.with_streaming_response.list_audit_logs() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + organization = response.parse() + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncOrganization: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + organization = await async_client.organization.get_costs( + start_time=0, + ) + assert_matches_type(UsageResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_get_costs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + organization = await async_client.organization.get_costs( + start_time=0, + bucket_width="1d", + end_time=0, + group_by=["project_id"], + limit=0, + page="page", + project_ids=["string"], + ) + assert_matches_type(UsageResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.with_raw_response.get_costs( + start_time=0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + organization = await response.parse() + assert_matches_type(UsageResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.with_streaming_response.get_costs( + start_time=0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + organization = await response.parse() + assert_matches_type(UsageResponse, organization, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + organization = await async_client.organization.list_audit_logs() + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_audit_logs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + organization = await async_client.organization.list_audit_logs( + actor_emails=["string"], + actor_ids=["string"], + after="after", + before="before", + effective_at={ + "gt": 0, + "gte": 0, + "lt": 0, + "lte": 0, + }, + event_types=["api_key.created"], + limit=0, + project_ids=["string"], + resource_ids=["string"], + ) + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.organization.with_raw_response.list_audit_logs() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + organization = await response.parse() + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.organization.with_streaming_response.list_audit_logs() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + organization = await response.parse() + assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_realtime.py b/tests/api_resources/test_realtime.py new file mode 100644 index 00000000..15797ff9 --- /dev/null +++ b/tests/api_resources/test_realtime.py @@ -0,0 +1,269 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + RealtimeCreateSessionResponse, + RealtimeCreateTranscriptionSessionResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRealtime: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_session(self, client: DigitaloceanGenaiSDK) -> None: + realtime = client.realtime.create_session() + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + realtime = client.realtime.create_session( + input_audio_format="pcm16", + input_audio_noise_reduction={"type": "near_field"}, + input_audio_transcription={ + "language": "language", + "model": "model", + "prompt": "prompt", + }, + instructions="instructions", + max_response_output_tokens=0, + modalities=["text"], + model="gpt-4o-realtime-preview", + output_audio_format="pcm16", + temperature=0, + tool_choice="tool_choice", + tools=[ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + turn_detection={ + "create_response": True, + "eagerness": "low", + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + "type": "server_vad", + }, + voice="ash", + ) + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_session(self, client: DigitaloceanGenaiSDK) -> None: + response = client.realtime.with_raw_response.create_session() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + realtime = response.parse() + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_session(self, client: DigitaloceanGenaiSDK) -> None: + with client.realtime.with_streaming_response.create_session() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + realtime = response.parse() + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None: + realtime = client.realtime.create_transcription_session() + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_transcription_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + realtime = client.realtime.create_transcription_session( + include=["string"], + input_audio_format="pcm16", + input_audio_noise_reduction={"type": "near_field"}, + input_audio_transcription={ + "language": "language", + "model": "gpt-4o-transcribe", + "prompt": "prompt", + }, + modalities=["text"], + turn_detection={ + "create_response": True, + "eagerness": "low", + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + "type": "server_vad", + }, + ) + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None: + response = client.realtime.with_raw_response.create_transcription_session() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + realtime = response.parse() + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None: + with client.realtime.with_streaming_response.create_transcription_session() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + realtime = response.parse() + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncRealtime: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + realtime = await async_client.realtime.create_session() + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_session_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + realtime = await async_client.realtime.create_session( + input_audio_format="pcm16", + input_audio_noise_reduction={"type": "near_field"}, + input_audio_transcription={ + "language": "language", + "model": "model", + "prompt": "prompt", + }, + instructions="instructions", + max_response_output_tokens=0, + modalities=["text"], + model="gpt-4o-realtime-preview", + output_audio_format="pcm16", + temperature=0, + tool_choice="tool_choice", + tools=[ + { + "description": "description", + "name": "name", + "parameters": {}, + "type": "function", + } + ], + turn_detection={ + "create_response": True, + "eagerness": "low", + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + "type": "server_vad", + }, + voice="ash", + ) + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.realtime.with_raw_response.create_session() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + realtime = await response.parse() + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.realtime.with_streaming_response.create_session() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + realtime = await response.parse() + assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + realtime = await async_client.realtime.create_transcription_session() + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_transcription_session_with_all_params( + self, async_client: AsyncDigitaloceanGenaiSDK + ) -> None: + realtime = await async_client.realtime.create_transcription_session( + include=["string"], + input_audio_format="pcm16", + input_audio_noise_reduction={"type": "near_field"}, + input_audio_transcription={ + "language": "language", + "model": "gpt-4o-transcribe", + "prompt": "prompt", + }, + modalities=["text"], + turn_detection={ + "create_response": True, + "eagerness": "low", + "interrupt_response": True, + "prefix_padding_ms": 0, + "silence_duration_ms": 0, + "threshold": 0, + "type": "server_vad", + }, + ) + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.realtime.with_raw_response.create_transcription_session() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + realtime = await response.parse() + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_transcription_session( + self, async_client: AsyncDigitaloceanGenaiSDK + ) -> None: + async with async_client.realtime.with_streaming_response.create_transcription_session() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + realtime = await response.parse() + assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py new file mode 100644 index 00000000..4bd7e367 --- /dev/null +++ b/tests/api_resources/test_responses.py @@ -0,0 +1,479 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + Response, + ResponseListInputItemsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestResponses: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.create( + input="string", + model="gpt-4o", + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.create( + input="string", + model="gpt-4o", + include=["file_search_call.results"], + instructions="instructions", + max_output_tokens=0, + metadata={"foo": "string"}, + parallel_tool_calls=True, + previous_response_id="previous_response_id", + reasoning={ + "effort": "low", + "generate_summary": "concise", + }, + store=True, + stream=True, + temperature=1, + text={"format": {"type": "text"}}, + tool_choice="none", + tools=[ + { + "type": "file_search", + "vector_store_ids": ["string"], + "filters": { + "key": "key", + "type": "eq", + "value": "string", + }, + "max_num_results": 0, + "ranking_options": { + "ranker": "auto", + "score_threshold": 0, + }, + } + ], + top_p=1, + truncation="auto", + user="user-1234", + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + http_response = client.responses.with_raw_response.create( + input="string", + model="gpt-4o", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.responses.with_streaming_response.create( + input="string", + model="gpt-4o", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + include=["file_search_call.results"], + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + http_response = client.responses.with_raw_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.responses.with_streaming_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + client.responses.with_raw_response.retrieve( + response_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + assert response is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + http_response = client.responses.with_raw_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert response is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.responses.with_streaming_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert response is None + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + client.responses.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.list_input_items( + response_id="response_id", + ) + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_input_items_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + response = client.responses.list_input_items( + response_id="response_id", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: + http_response = client.responses.with_raw_response.list_input_items( + response_id="response_id", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = http_response.parse() + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: + with client.responses.with_streaming_response.list_input_items( + response_id="response_id", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = http_response.parse() + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + client.responses.with_raw_response.list_input_items( + response_id="", + ) + + +class TestAsyncResponses: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.create( + input="string", + model="gpt-4o", + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.create( + input="string", + model="gpt-4o", + include=["file_search_call.results"], + instructions="instructions", + max_output_tokens=0, + metadata={"foo": "string"}, + parallel_tool_calls=True, + previous_response_id="previous_response_id", + reasoning={ + "effort": "low", + "generate_summary": "concise", + }, + store=True, + stream=True, + temperature=1, + text={"format": {"type": "text"}}, + tool_choice="none", + tools=[ + { + "type": "file_search", + "vector_store_ids": ["string"], + "filters": { + "key": "key", + "type": "eq", + "value": "string", + }, + "max_num_results": 0, + "ranking_options": { + "ranker": "auto", + "score_threshold": 0, + }, + } + ], + top_p=1, + truncation="auto", + user="user-1234", + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + http_response = await async_client.responses.with_raw_response.create( + input="string", + model="gpt-4o", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = await http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.responses.with_streaming_response.create( + input="string", + model="gpt-4o", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + include=["file_search_call.results"], + ) + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + http_response = await async_client.responses.with_raw_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = await http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.responses.with_streaming_response.retrieve( + response_id="resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert_matches_type(Response, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + await async_client.responses.with_raw_response.retrieve( + response_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + assert response is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + http_response = await async_client.responses.with_raw_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = await http_response.parse() + assert response is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.responses.with_streaming_response.delete( + "resp_677efb5139a88190b512bc3fef8e535d", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert response is None + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + await async_client.responses.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.list_input_items( + response_id="response_id", + ) + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_input_items_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.responses.list_input_items( + response_id="response_id", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + http_response = await async_client.responses.with_raw_response.list_input_items( + response_id="response_id", + ) + + assert http_response.is_closed is True + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + response = await http_response.parse() + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.responses.with_streaming_response.list_input_items( + response_id="response_id", + ) as http_response: + assert not http_response.is_closed + assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" + + response = await http_response.parse() + assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) + + assert cast(Any, http_response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): + await async_client.responses.with_raw_response.list_input_items( + response_id="", + ) diff --git a/tests/api_resources/test_threads.py b/tests/api_resources/test_threads.py new file mode 100644 index 00000000..cca5e067 --- /dev/null +++ b/tests/api_resources/test_threads.py @@ -0,0 +1,424 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ThreadObject, ThreadDeleteResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestThreads: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + thread = client.threads.create() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + thread = client.threads.create( + messages=[ + { + "content": "string", + "role": "user", + "attachments": [ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + "metadata": {"foo": "string"}, + } + ], + metadata={"foo": "string"}, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + thread = client.threads.retrieve( + "thread_id", + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.with_raw_response.retrieve( + "thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.with_streaming_response.retrieve( + "thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + thread = client.threads.update( + thread_id="thread_id", + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + thread = client.threads.update( + thread_id="thread_id", + metadata={"foo": "string"}, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": {"vector_store_ids": ["string"]}, + }, + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.with_raw_response.update( + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.with_streaming_response.update( + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.with_raw_response.update( + thread_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + thread = client.threads.delete( + "thread_id", + ) + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.with_raw_response.delete( + "thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.with_streaming_response.delete( + "thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.with_raw_response.delete( + "", + ) + + +class TestAsyncThreads: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + thread = await async_client.threads.create() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + thread = await async_client.threads.create( + messages=[ + { + "content": "string", + "role": "user", + "attachments": [ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + "metadata": {"foo": "string"}, + } + ], + metadata={"foo": "string"}, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = await response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + thread = await async_client.threads.retrieve( + "thread_id", + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.with_raw_response.retrieve( + "thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = await response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.with_streaming_response.retrieve( + "thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + thread = await async_client.threads.update( + thread_id="thread_id", + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + thread = await async_client.threads.update( + thread_id="thread_id", + metadata={"foo": "string"}, + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": {"vector_store_ids": ["string"]}, + }, + ) + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.with_raw_response.update( + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = await response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.with_streaming_response.update( + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(ThreadObject, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.with_raw_response.update( + thread_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + thread = await async_client.threads.delete( + "thread_id", + ) + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.with_raw_response.delete( + "thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + thread = await response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.with_streaming_response.delete( + "thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + thread = await response.parse() + assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py new file mode 100644 index 00000000..35f52730 --- /dev/null +++ b/tests/api_resources/test_uploads.py @@ -0,0 +1,399 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + Upload, + UploadAddPartResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestUploads: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + upload = client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.uploads.with_raw_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.uploads.with_streaming_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_add_part(self, client: DigitaloceanGenaiSDK) -> None: + upload = client.uploads.add_part( + upload_id="upload_abc123", + data=b"raw file contents", + ) + assert_matches_type(UploadAddPartResponse, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_add_part(self, client: DigitaloceanGenaiSDK) -> None: + response = client.uploads.with_raw_response.add_part( + upload_id="upload_abc123", + data=b"raw file contents", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(UploadAddPartResponse, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add_part(self, client: DigitaloceanGenaiSDK) -> None: + with client.uploads.with_streaming_response.add_part( + upload_id="upload_abc123", + data=b"raw file contents", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(UploadAddPartResponse, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add_part(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + client.uploads.with_raw_response.add_part( + upload_id="", + data=b"raw file contents", + ) + + @pytest.mark.skip() + @parametrize + def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: + upload = client.uploads.cancel( + "upload_abc123", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + response = client.uploads.with_raw_response.cancel( + "upload_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with client.uploads.with_streaming_response.cancel( + "upload_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + client.uploads.with_raw_response.cancel( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_complete(self, client: DigitaloceanGenaiSDK) -> None: + upload = client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string"], + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_complete_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + upload = client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string"], + md5="md5", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_complete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.uploads.with_raw_response.complete( + upload_id="upload_abc123", + part_ids=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_complete(self, client: DigitaloceanGenaiSDK) -> None: + with client.uploads.with_streaming_response.complete( + upload_id="upload_abc123", + part_ids=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_complete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + client.uploads.with_raw_response.complete( + upload_id="", + part_ids=["string"], + ) + + +class TestAsyncUploads: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + upload = await async_client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.uploads.with_raw_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.uploads.with_streaming_response.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + upload = await async_client.uploads.add_part( + upload_id="upload_abc123", + data=b"raw file contents", + ) + assert_matches_type(UploadAddPartResponse, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.uploads.with_raw_response.add_part( + upload_id="upload_abc123", + data=b"raw file contents", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = await response.parse() + assert_matches_type(UploadAddPartResponse, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.uploads.with_streaming_response.add_part( + upload_id="upload_abc123", + data=b"raw file contents", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(UploadAddPartResponse, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + await async_client.uploads.with_raw_response.add_part( + upload_id="", + data=b"raw file contents", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + upload = await async_client.uploads.cancel( + "upload_abc123", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.uploads.with_raw_response.cancel( + "upload_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.uploads.with_streaming_response.cancel( + "upload_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + await async_client.uploads.with_raw_response.cancel( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + upload = await async_client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string"], + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_complete_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + upload = await async_client.uploads.complete( + upload_id="upload_abc123", + part_ids=["string"], + md5="md5", + ) + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.uploads.with_raw_response.complete( + upload_id="upload_abc123", + part_ids=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.uploads.with_streaming_response.complete( + upload_id="upload_abc123", + part_ids=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + upload = await response.parse() + assert_matches_type(Upload, upload, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): + await async_client.uploads.with_raw_response.complete( + upload_id="", + part_ids=["string"], + ) diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py new file mode 100644 index 00000000..1c8b5fb0 --- /dev/null +++ b/tests/api_resources/test_vector_stores.py @@ -0,0 +1,603 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import ( + VectorStoreObject, + VectorStoreListResponse, + VectorStoreDeleteResponse, + VectorStoreSearchResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestVectorStores: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.create() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.create( + chunking_strategy={"type": "auto"}, + expires_after={ + "anchor": "last_active_at", + "days": 1, + }, + file_ids=["string"], + metadata={"foo": "string"}, + name="name", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.retrieve( + "vector_store_id", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.with_raw_response.retrieve( + "vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.with_streaming_response.retrieve( + "vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.update( + vector_store_id="vector_store_id", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.update( + vector_store_id="vector_store_id", + expires_after={ + "anchor": "last_active_at", + "days": 1, + }, + metadata={"foo": "string"}, + name="name", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.with_raw_response.update( + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.with_streaming_response.update( + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.with_raw_response.update( + vector_store_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.list() + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.list( + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.delete( + "vector_store_id", + ) + assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.with_raw_response.delete( + "vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.with_streaming_response.delete( + "vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_search(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + ) + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_search_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + vector_store = client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + filters={ + "key": "key", + "type": "eq", + "value": "string", + }, + max_num_results=1, + ranking_options={ + "ranker": "auto", + "score_threshold": 0, + }, + rewrite_query=True, + ) + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_search(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.with_raw_response.search( + vector_store_id="vs_abc123", + query="string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = response.parse() + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_search(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.with_streaming_response.search( + vector_store_id="vs_abc123", + query="string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = response.parse() + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_search(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.with_raw_response.search( + vector_store_id="", + query="string", + ) + + +class TestAsyncVectorStores: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.create() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.create( + chunking_strategy={"type": "auto"}, + expires_after={ + "anchor": "last_active_at", + "days": 1, + }, + file_ids=["string"], + metadata={"foo": "string"}, + name="name", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = await response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.retrieve( + "vector_store_id", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.with_raw_response.retrieve( + "vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = await response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.with_streaming_response.retrieve( + "vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.update( + vector_store_id="vector_store_id", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.update( + vector_store_id="vector_store_id", + expires_after={ + "anchor": "last_active_at", + "days": 1, + }, + metadata={"foo": "string"}, + name="name", + ) + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.with_raw_response.update( + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = await response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.with_streaming_response.update( + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(VectorStoreObject, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.with_raw_response.update( + vector_store_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.list() + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.list( + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = await response.parse() + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.delete( + "vector_store_id", + ) + assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.with_raw_response.delete( + "vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = await response.parse() + assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.with_streaming_response.delete( + "vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + ) + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_search_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + vector_store = await async_client.vector_stores.search( + vector_store_id="vs_abc123", + query="string", + filters={ + "key": "key", + "type": "eq", + "value": "string", + }, + max_num_results=1, + ranking_options={ + "ranker": "auto", + "score_threshold": 0, + }, + rewrite_query=True, + ) + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.with_raw_response.search( + vector_store_id="vs_abc123", + query="string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + vector_store = await response.parse() + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.with_streaming_response.search( + vector_store_id="vs_abc123", + query="string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + vector_store = await response.parse() + assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.with_raw_response.search( + vector_store_id="", + query="string", + ) diff --git a/tests/api_resources/threads/__init__.py b/tests/api_resources/threads/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/threads/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/threads/runs/__init__.py b/tests/api_resources/threads/runs/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/threads/runs/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/threads/runs/test_steps.py b/tests/api_resources/threads/runs/test_steps.py new file mode 100644 index 00000000..e972e952 --- /dev/null +++ b/tests/api_resources/threads/runs/test_steps.py @@ -0,0 +1,307 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.threads.runs import ( + RunStepObject, + StepListResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSteps: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + step = client.threads.runs.steps.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + ) + assert_matches_type(RunStepObject, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + step = client.threads.runs.steps.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], + ) + assert_matches_type(RunStepObject, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.steps.with_raw_response.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + step = response.parse() + assert_matches_type(RunStepObject, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.steps.with_streaming_response.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + step = response.parse() + assert_matches_type(RunStepObject, step, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.steps.with_raw_response.retrieve( + step_id="step_id", + thread_id="", + run_id="run_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + client.threads.runs.steps.with_raw_response.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): + client.threads.runs.steps.with_raw_response.retrieve( + step_id="", + thread_id="thread_id", + run_id="run_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + step = client.threads.runs.steps.list( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(StepListResponse, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + step = client.threads.runs.steps.list( + run_id="run_id", + thread_id="thread_id", + after="after", + before="before", + include=["step_details.tool_calls[*].file_search.results[*].content"], + limit=0, + order="asc", + ) + assert_matches_type(StepListResponse, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.steps.with_raw_response.list( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + step = response.parse() + assert_matches_type(StepListResponse, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.steps.with_streaming_response.list( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + step = response.parse() + assert_matches_type(StepListResponse, step, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.steps.with_raw_response.list( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + client.threads.runs.steps.with_raw_response.list( + run_id="", + thread_id="thread_id", + ) + + +class TestAsyncSteps: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + step = await async_client.threads.runs.steps.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + ) + assert_matches_type(RunStepObject, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + step = await async_client.threads.runs.steps.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], + ) + assert_matches_type(RunStepObject, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.steps.with_raw_response.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + step = await response.parse() + assert_matches_type(RunStepObject, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.steps.with_streaming_response.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="run_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + step = await response.parse() + assert_matches_type(RunStepObject, step, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.steps.with_raw_response.retrieve( + step_id="step_id", + thread_id="", + run_id="run_id", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + await async_client.threads.runs.steps.with_raw_response.retrieve( + step_id="step_id", + thread_id="thread_id", + run_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): + await async_client.threads.runs.steps.with_raw_response.retrieve( + step_id="", + thread_id="thread_id", + run_id="run_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + step = await async_client.threads.runs.steps.list( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(StepListResponse, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + step = await async_client.threads.runs.steps.list( + run_id="run_id", + thread_id="thread_id", + after="after", + before="before", + include=["step_details.tool_calls[*].file_search.results[*].content"], + limit=0, + order="asc", + ) + assert_matches_type(StepListResponse, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.steps.with_raw_response.list( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + step = await response.parse() + assert_matches_type(StepListResponse, step, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.steps.with_streaming_response.list( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + step = await response.parse() + assert_matches_type(StepListResponse, step, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.steps.with_raw_response.list( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + await async_client.threads.runs.steps.with_raw_response.list( + run_id="", + thread_id="thread_id", + ) diff --git a/tests/api_resources/threads/test_messages.py b/tests/api_resources/threads/test_messages.py new file mode 100644 index 00000000..e1aaf51e --- /dev/null +++ b/tests/api_resources/threads/test_messages.py @@ -0,0 +1,602 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.threads import ( + MessageObject, + MessageListResponse, + MessageDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestMessages: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.create( + thread_id="thread_id", + content="string", + role="user", + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.create( + thread_id="thread_id", + content="string", + role="user", + attachments=[ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + metadata={"foo": "string"}, + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.messages.with_raw_response.create( + thread_id="thread_id", + content="string", + role="user", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.messages.with_streaming_response.create( + thread_id="thread_id", + content="string", + role="user", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.messages.with_raw_response.create( + thread_id="", + content="string", + role="user", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.retrieve( + message_id="message_id", + thread_id="thread_id", + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.messages.with_raw_response.retrieve( + message_id="message_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.messages.with_streaming_response.retrieve( + message_id="message_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.messages.with_raw_response.retrieve( + message_id="message_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + client.threads.messages.with_raw_response.retrieve( + message_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.update( + message_id="message_id", + thread_id="thread_id", + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.update( + message_id="message_id", + thread_id="thread_id", + metadata={"foo": "string"}, + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.messages.with_raw_response.update( + message_id="message_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.messages.with_streaming_response.update( + message_id="message_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.messages.with_raw_response.update( + message_id="message_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + client.threads.messages.with_raw_response.update( + message_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.list( + thread_id="thread_id", + ) + assert_matches_type(MessageListResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.list( + thread_id="thread_id", + after="after", + before="before", + limit=0, + order="asc", + run_id="run_id", + ) + assert_matches_type(MessageListResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.messages.with_raw_response.list( + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageListResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.messages.with_streaming_response.list( + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(MessageListResponse, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.messages.with_raw_response.list( + thread_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + message = client.threads.messages.delete( + message_id="message_id", + thread_id="thread_id", + ) + assert_matches_type(MessageDeleteResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.messages.with_raw_response.delete( + message_id="message_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = response.parse() + assert_matches_type(MessageDeleteResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.messages.with_streaming_response.delete( + message_id="message_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = response.parse() + assert_matches_type(MessageDeleteResponse, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.messages.with_raw_response.delete( + message_id="message_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + client.threads.messages.with_raw_response.delete( + message_id="", + thread_id="thread_id", + ) + + +class TestAsyncMessages: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.create( + thread_id="thread_id", + content="string", + role="user", + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.create( + thread_id="thread_id", + content="string", + role="user", + attachments=[ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + metadata={"foo": "string"}, + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.messages.with_raw_response.create( + thread_id="thread_id", + content="string", + role="user", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = await response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.messages.with_streaming_response.create( + thread_id="thread_id", + content="string", + role="user", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.messages.with_raw_response.create( + thread_id="", + content="string", + role="user", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.retrieve( + message_id="message_id", + thread_id="thread_id", + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.messages.with_raw_response.retrieve( + message_id="message_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = await response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.messages.with_streaming_response.retrieve( + message_id="message_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.messages.with_raw_response.retrieve( + message_id="message_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + await async_client.threads.messages.with_raw_response.retrieve( + message_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.update( + message_id="message_id", + thread_id="thread_id", + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.update( + message_id="message_id", + thread_id="thread_id", + metadata={"foo": "string"}, + ) + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.messages.with_raw_response.update( + message_id="message_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = await response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.messages.with_streaming_response.update( + message_id="message_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(MessageObject, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.messages.with_raw_response.update( + message_id="message_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + await async_client.threads.messages.with_raw_response.update( + message_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.list( + thread_id="thread_id", + ) + assert_matches_type(MessageListResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.list( + thread_id="thread_id", + after="after", + before="before", + limit=0, + order="asc", + run_id="run_id", + ) + assert_matches_type(MessageListResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.messages.with_raw_response.list( + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = await response.parse() + assert_matches_type(MessageListResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.messages.with_streaming_response.list( + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(MessageListResponse, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.messages.with_raw_response.list( + thread_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + message = await async_client.threads.messages.delete( + message_id="message_id", + thread_id="thread_id", + ) + assert_matches_type(MessageDeleteResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.messages.with_raw_response.delete( + message_id="message_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + message = await response.parse() + assert_matches_type(MessageDeleteResponse, message, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.messages.with_streaming_response.delete( + message_id="message_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + message = await response.parse() + assert_matches_type(MessageDeleteResponse, message, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.messages.with_raw_response.delete( + message_id="message_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): + await async_client.threads.messages.with_raw_response.delete( + message_id="", + thread_id="thread_id", + ) diff --git a/tests/api_resources/threads/test_runs.py b/tests/api_resources/threads/test_runs.py new file mode 100644 index 00000000..59716b5e --- /dev/null +++ b/tests/api_resources/threads/test_runs.py @@ -0,0 +1,967 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.threads import ( + RunObject, + RunListResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRuns: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.create( + assistant_id="assistant_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.create( + assistant_id="assistant_id", + instructions="instructions", + max_completion_tokens=256, + max_prompt_tokens=256, + metadata={"foo": "string"}, + model="gpt-4o", + parallel_tool_calls=True, + response_format="auto", + stream=True, + temperature=1, + thread={ + "messages": [ + { + "content": "string", + "role": "user", + "attachments": [ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + "metadata": {"foo": "string"}, + } + ], + "metadata": {"foo": "string"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, + }, + tool_choice="none", + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": {"vector_store_ids": ["string"]}, + }, + tools=[{"type": "code_interpreter"}], + top_p=1, + truncation_strategy={ + "type": "auto", + "last_messages": 1, + }, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.create( + assistant_id="assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.create( + assistant_id="assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.retrieve( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.retrieve( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.retrieve( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.with_raw_response.retrieve( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + client.threads.runs.with_raw_response.retrieve( + run_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.update( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.update( + run_id="run_id", + thread_id="thread_id", + metadata={"foo": "string"}, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.update( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.update( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.with_raw_response.update( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + client.threads.runs.with_raw_response.update( + run_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.list( + thread_id="thread_id", + ) + assert_matches_type(RunListResponse, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.list( + thread_id="thread_id", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(RunListResponse, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.list( + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunListResponse, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.list( + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunListResponse, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.with_raw_response.list( + thread_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.cancel( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.cancel( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.cancel( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.with_raw_response.cancel( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + client.threads.runs.with_raw_response.cancel( + run_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_create_run(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_run_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], + additional_instructions="additional_instructions", + additional_messages=[ + { + "content": "string", + "role": "user", + "attachments": [ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + "metadata": {"foo": "string"}, + } + ], + instructions="instructions", + max_completion_tokens=256, + max_prompt_tokens=256, + metadata={"foo": "string"}, + model="gpt-4o", + parallel_tool_calls=True, + reasoning_effort="low", + response_format="auto", + stream=True, + temperature=1, + tool_choice="none", + tools=[{"type": "code_interpreter"}], + top_p=1, + truncation_strategy={ + "type": "auto", + "last_messages": 1, + }, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_run(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_run(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create_run(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.with_raw_response.create_run( + thread_id="", + assistant_id="assistant_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[{}], + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_submit_tool_outputs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + run = client.threads.runs.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[ + { + "output": "output", + "tool_call_id": "tool_call_id", + } + ], + stream=True, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: + response = client.threads.runs.with_raw_response.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[{}], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: + with client.threads.runs.with_streaming_response.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[{}], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + client.threads.runs.with_raw_response.submit_tool_outputs( + run_id="run_id", + thread_id="", + tool_outputs=[{}], + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + client.threads.runs.with_raw_response.submit_tool_outputs( + run_id="", + thread_id="thread_id", + tool_outputs=[{}], + ) + + +class TestAsyncRuns: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.create( + assistant_id="assistant_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.create( + assistant_id="assistant_id", + instructions="instructions", + max_completion_tokens=256, + max_prompt_tokens=256, + metadata={"foo": "string"}, + model="gpt-4o", + parallel_tool_calls=True, + response_format="auto", + stream=True, + temperature=1, + thread={ + "messages": [ + { + "content": "string", + "role": "user", + "attachments": [ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + "metadata": {"foo": "string"}, + } + ], + "metadata": {"foo": "string"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["string"]}, + "file_search": { + "vector_store_ids": ["string"], + "vector_stores": [ + { + "chunking_strategy": {"type": "auto"}, + "file_ids": ["string"], + "metadata": {"foo": "string"}, + } + ], + }, + }, + }, + tool_choice="none", + tool_resources={ + "code_interpreter": {"file_ids": ["string"]}, + "file_search": {"vector_store_ids": ["string"]}, + }, + tools=[{"type": "code_interpreter"}], + top_p=1, + truncation_strategy={ + "type": "auto", + "last_messages": 1, + }, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.create( + assistant_id="assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.create( + assistant_id="assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.retrieve( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.retrieve( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.retrieve( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.with_raw_response.retrieve( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + await async_client.threads.runs.with_raw_response.retrieve( + run_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.update( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.update( + run_id="run_id", + thread_id="thread_id", + metadata={"foo": "string"}, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.update( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.update( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.with_raw_response.update( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + await async_client.threads.runs.with_raw_response.update( + run_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.list( + thread_id="thread_id", + ) + assert_matches_type(RunListResponse, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.list( + thread_id="thread_id", + after="after", + before="before", + limit=0, + order="asc", + ) + assert_matches_type(RunListResponse, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.list( + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunListResponse, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.list( + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunListResponse, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.with_raw_response.list( + thread_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.cancel( + run_id="run_id", + thread_id="thread_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.cancel( + run_id="run_id", + thread_id="thread_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.cancel( + run_id="run_id", + thread_id="thread_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.with_raw_response.cancel( + run_id="run_id", + thread_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + await async_client.threads.runs.with_raw_response.cancel( + run_id="", + thread_id="thread_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_run_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + include=["step_details.tool_calls[*].file_search.results[*].content"], + additional_instructions="additional_instructions", + additional_messages=[ + { + "content": "string", + "role": "user", + "attachments": [ + { + "file_id": "file_id", + "tools": [{"type": "code_interpreter"}], + } + ], + "metadata": {"foo": "string"}, + } + ], + instructions="instructions", + max_completion_tokens=256, + max_prompt_tokens=256, + metadata={"foo": "string"}, + model="gpt-4o", + parallel_tool_calls=True, + reasoning_effort="low", + response_format="auto", + stream=True, + temperature=1, + tool_choice="none", + tools=[{"type": "code_interpreter"}], + top_p=1, + truncation_strategy={ + "type": "auto", + "last_messages": 1, + }, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.create_run( + thread_id="thread_id", + assistant_id="assistant_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.with_raw_response.create_run( + thread_id="", + assistant_id="assistant_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[{}], + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_submit_tool_outputs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + run = await async_client.threads.runs.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[ + { + "output": "output", + "tool_call_id": "tool_call_id", + } + ], + stream=True, + ) + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.threads.runs.with_raw_response.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[{}], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.threads.runs.with_streaming_response.submit_tool_outputs( + run_id="run_id", + thread_id="thread_id", + tool_outputs=[{}], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + run = await response.parse() + assert_matches_type(RunObject, run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): + await async_client.threads.runs.with_raw_response.submit_tool_outputs( + run_id="run_id", + thread_id="", + tool_outputs=[{}], + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): + await async_client.threads.runs.with_raw_response.submit_tool_outputs( + run_id="", + thread_id="thread_id", + tool_outputs=[{}], + ) diff --git a/tests/api_resources/vector_stores/__init__.py b/tests/api_resources/vector_stores/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/vector_stores/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py new file mode 100644 index 00000000..47897412 --- /dev/null +++ b/tests/api_resources/vector_stores/test_file_batches.py @@ -0,0 +1,479 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.vector_stores import ( + VectorStoreFileBatchObject, + ListVectorStoreFilesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFileBatches: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + file_batch = client.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["string"], + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + file_batch = client.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["string"], + attributes={"foo": "string"}, + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.file_batches.with_raw_response.create( + vector_store_id="vs_abc123", + file_ids=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.file_batches.with_streaming_response.create( + vector_store_id="vs_abc123", + file_ids=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.create( + vector_store_id="", + file_ids=["string"], + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + file_batch = client.vector_stores.file_batches.retrieve( + batch_id="vsfb_abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.file_batches.with_raw_response.retrieve( + batch_id="vsfb_abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.file_batches.with_streaming_response.retrieve( + batch_id="vsfb_abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.retrieve( + batch_id="vsfb_abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.retrieve( + batch_id="", + vector_store_id="vs_abc123", + ) + + @pytest.mark.skip() + @parametrize + def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: + file_batch = client.vector_stores.file_batches.cancel( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.file_batches.with_raw_response.cancel( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.file_batches.with_streaming_response.cancel( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.cancel( + batch_id="batch_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.cancel( + batch_id="", + vector_store_id="vector_store_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_files(self, client: DigitaloceanGenaiSDK) -> None: + file_batch = client.vector_stores.file_batches.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_files_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + file_batch = client.vector_stores.file_batches.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + after="after", + before="before", + filter="in_progress", + limit=0, + order="asc", + ) + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_files(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.file_batches.with_raw_response.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_files(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.file_batches.with_streaming_response.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_files(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.list_files( + batch_id="batch_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + client.vector_stores.file_batches.with_raw_response.list_files( + batch_id="", + vector_store_id="vector_store_id", + ) + + +class TestAsyncFileBatches: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file_batch = await async_client.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["string"], + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file_batch = await async_client.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["string"], + attributes={"foo": "string"}, + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.file_batches.with_raw_response.create( + vector_store_id="vs_abc123", + file_ids=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = await response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.file_batches.with_streaming_response.create( + vector_store_id="vs_abc123", + file_ids=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = await response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.create( + vector_store_id="", + file_ids=["string"], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file_batch = await async_client.vector_stores.file_batches.retrieve( + batch_id="vsfb_abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.file_batches.with_raw_response.retrieve( + batch_id="vsfb_abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = await response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.file_batches.with_streaming_response.retrieve( + batch_id="vsfb_abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = await response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.retrieve( + batch_id="vsfb_abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.retrieve( + batch_id="", + vector_store_id="vs_abc123", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file_batch = await async_client.vector_stores.file_batches.cancel( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.file_batches.with_raw_response.cancel( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = await response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.file_batches.with_streaming_response.cancel( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = await response.parse() + assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.cancel( + batch_id="batch_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.cancel( + batch_id="", + vector_store_id="vector_store_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file_batch = await async_client.vector_stores.file_batches.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_files_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file_batch = await async_client.vector_stores.file_batches.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + after="after", + before="before", + filter="in_progress", + limit=0, + order="asc", + ) + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.file_batches.with_raw_response.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file_batch = await response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.file_batches.with_streaming_response.list_files( + batch_id="batch_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file_batch = await response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.list_files( + batch_id="batch_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): + await async_client.vector_stores.file_batches.with_raw_response.list_files( + batch_id="", + vector_store_id="vector_store_id", + ) diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py new file mode 100644 index 00000000..b93fe1b4 --- /dev/null +++ b/tests/api_resources/vector_stores/test_files.py @@ -0,0 +1,677 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.vector_stores import ( + FileDeleteResponse, + VectorStoreFileObject, + FileRetrieveContentResponse, + ListVectorStoreFilesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFiles: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file_id", + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file_id", + attributes={"foo": "string"}, + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.files.with_raw_response.create( + vector_store_id="vs_abc123", + file_id="file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.files.with_streaming_response.create( + vector_store_id="vs_abc123", + file_id="file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.create( + vector_store_id="", + file_id="file_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.retrieve( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.files.with_raw_response.retrieve( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.files.with_streaming_response.retrieve( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.retrieve( + file_id="file-abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.retrieve( + file_id="", + vector_store_id="vs_abc123", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.files.with_streaming_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="", + attributes={"foo": "string"}, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.update( + file_id="", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.list( + vector_store_id="vector_store_id", + ) + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.list( + vector_store_id="vector_store_id", + after="after", + before="before", + filter="in_progress", + limit=0, + order="asc", + ) + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.files.with_raw_response.list( + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.files.with_streaming_response.list( + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.list( + vector_store_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.files.with_streaming_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.delete( + file_id="", + vector_store_id="vector_store_id", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + file = client.vector_stores.files.retrieve_content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + response = client.vector_stores.files.with_raw_response.retrieve_content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = response.parse() + assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + with client.vector_stores.files.with_streaming_response.retrieve_content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = response.parse() + assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + client.vector_stores.files.with_raw_response.retrieve_content( + file_id="file-abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + client.vector_stores.files.with_raw_response.retrieve_content( + file_id="", + vector_store_id="vs_abc123", + ) + + +class TestAsyncFiles: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file_id", + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file_id", + attributes={"foo": "string"}, + chunking_strategy={"type": "auto"}, + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.files.with_raw_response.create( + vector_store_id="vs_abc123", + file_id="file_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.files.with_streaming_response.create( + vector_store_id="vs_abc123", + file_id="file_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.create( + vector_store_id="", + file_id="file_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.retrieve( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.files.with_raw_response.retrieve( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.files.with_streaming_response.retrieve( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.retrieve( + file_id="file-abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.retrieve( + file_id="", + vector_store_id="vs_abc123", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.files.with_streaming_response.update( + file_id="file-abc123", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(VectorStoreFileObject, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.update( + file_id="file-abc123", + vector_store_id="", + attributes={"foo": "string"}, + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.update( + file_id="", + vector_store_id="vs_abc123", + attributes={"foo": "string"}, + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.list( + vector_store_id="vector_store_id", + ) + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.list( + vector_store_id="vector_store_id", + after="after", + before="before", + filter="in_progress", + limit=0, + order="asc", + ) + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.files.with_raw_response.list( + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.files.with_streaming_response.list( + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.list( + vector_store_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.files.with_streaming_response.delete( + file_id="file_id", + vector_store_id="vector_store_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileDeleteResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.delete( + file_id="file_id", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.delete( + file_id="", + vector_store_id="vector_store_id", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + file = await async_client.vector_stores.files.retrieve_content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.vector_stores.files.with_raw_response.retrieve_content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + file = await response.parse() + assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.vector_stores.files.with_streaming_response.retrieve_content( + file_id="file-abc123", + vector_store_id="vs_abc123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + file = await response.parse() + assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.retrieve_content( + file_id="file-abc123", + vector_store_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): + await async_client.vector_stores.files.with_raw_response.retrieve_content( + file_id="", + vector_store_id="vs_abc123", + ) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..abd9aa51 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import os +import logging +from typing import TYPE_CHECKING, Iterator, AsyncIterator + +import pytest +from pytest_asyncio import is_async_test + +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + +if TYPE_CHECKING: + from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] + +pytest.register_assert_rewrite("tests.utils") + +logging.getLogger("digitalocean_genai_sdk").setLevel(logging.DEBUG) + + +# automatically add `pytest.mark.asyncio()` to all of our async tests +# so we don't have to add that boilerplate everywhere +def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: + pytest_asyncio_tests = (item for item in items if is_async_test(item)) + session_scope_marker = pytest.mark.asyncio(loop_scope="session") + for async_test in pytest_asyncio_tests: + async_test.add_marker(session_scope_marker, append=False) + + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + +api_key = "My API Key" + + +@pytest.fixture(scope="session") +def client(request: FixtureRequest) -> Iterator[DigitaloceanGenaiSDK]: + strict = getattr(request, "param", True) + if not isinstance(strict, bool): + raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") + + with DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + yield client + + +@pytest.fixture(scope="session") +async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncDigitaloceanGenaiSDK]: + strict = getattr(request, "param", True) + if not isinstance(strict, bool): + raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") + + async with AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=strict + ) as client: + yield client diff --git a/tests/sample_file.txt b/tests/sample_file.txt new file mode 100644 index 00000000..af5626b4 --- /dev/null +++ b/tests/sample_file.txt @@ -0,0 +1 @@ +Hello, world! diff --git a/tests/test_client.py b/tests/test_client.py new file mode 100644 index 00000000..c13403e3 --- /dev/null +++ b/tests/test_client.py @@ -0,0 +1,1640 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import gc +import os +import sys +import json +import time +import asyncio +import inspect +import subprocess +import tracemalloc +from typing import Any, Union, cast +from textwrap import dedent +from unittest import mock +from typing_extensions import Literal + +import httpx +import pytest +from respx import MockRouter +from pydantic import ValidationError + +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError +from digitalocean_genai_sdk._types import Omit +from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions +from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER +from digitalocean_genai_sdk._exceptions import ( + APIStatusError, + APITimeoutError, + DigitaloceanGenaiSDKError, + APIResponseValidationError, +) +from digitalocean_genai_sdk._base_client import ( + DEFAULT_TIMEOUT, + HTTPX_DEFAULT_TIMEOUT, + BaseClient, + make_request_options, +) + +from .utils import update_env + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") +api_key = "My API Key" + + +def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + url = httpx.URL(request.url) + return dict(url.params) + + +def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: + return 0.1 + + +def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiSDK) -> int: + transport = client._client._transport + assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) + + pool = transport._pool + return len(pool._requests) + + +class TestDigitaloceanGenaiSDK: + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + + @pytest.mark.respx(base_url=base_url) + def test_raw_response(self, respx_mock: MockRouter) -> None: + respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = self.client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + @pytest.mark.respx(base_url=base_url) + def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') + ) + + response = self.client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + def test_copy(self) -> None: + copied = self.client.copy() + assert id(copied) != id(self.client) + + copied = self.client.copy(api_key="another My API Key") + assert copied.api_key == "another My API Key" + assert self.client.api_key == "My API Key" + + def test_copy_default_options(self) -> None: + # options that have a default are overridden correctly + copied = self.client.copy(max_retries=7) + assert copied.max_retries == 7 + assert self.client.max_retries == 2 + + copied2 = copied.copy(max_retries=6) + assert copied2.max_retries == 6 + assert copied.max_retries == 7 + + # timeout + assert isinstance(self.client.timeout, httpx.Timeout) + copied = self.client.copy(timeout=None) + assert copied.timeout is None + assert isinstance(self.client.timeout, httpx.Timeout) + + def test_copy_default_headers(self) -> None: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + ) + assert client.default_headers["X-Foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert copied.default_headers["X-Foo"] == "bar" + + # merges already given headers + copied = client.copy(default_headers={"X-Bar": "stainless"}) + assert copied.default_headers["X-Foo"] == "bar" + assert copied.default_headers["X-Bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_headers={"X-Foo": "stainless"}) + assert copied.default_headers["X-Foo"] == "stainless" + + # set_default_headers + + # completely overrides already set values + copied = client.copy(set_default_headers={}) + assert copied.default_headers.get("X-Foo") is None + + copied = client.copy(set_default_headers={"X-Bar": "Robert"}) + assert copied.default_headers["X-Bar"] == "Robert" + + with pytest.raises( + ValueError, + match="`default_headers` and `set_default_headers` arguments are mutually exclusive", + ): + client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + + def test_copy_default_query(self) -> None: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} + ) + assert _get_params(client)["foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert _get_params(copied)["foo"] == "bar" + + # merges already given params + copied = client.copy(default_query={"bar": "stainless"}) + params = _get_params(copied) + assert params["foo"] == "bar" + assert params["bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_query={"foo": "stainless"}) + assert _get_params(copied)["foo"] == "stainless" + + # set_default_query + + # completely overrides already set values + copied = client.copy(set_default_query={}) + assert _get_params(copied) == {} + + copied = client.copy(set_default_query={"bar": "Robert"}) + assert _get_params(copied)["bar"] == "Robert" + + with pytest.raises( + ValueError, + # TODO: update + match="`default_query` and `set_default_query` arguments are mutually exclusive", + ): + client.copy(set_default_query={}, default_query={"foo": "Bar"}) + + def test_copy_signature(self) -> None: + # ensure the same parameters that can be passed to the client are defined in the `.copy()` method + init_signature = inspect.signature( + # mypy doesn't like that we access the `__init__` property. + self.client.__init__, # type: ignore[misc] + ) + copy_signature = inspect.signature(self.client.copy) + exclude_params = {"transport", "proxies", "_strict_response_validation"} + + for name in init_signature.parameters.keys(): + if name in exclude_params: + continue + + copy_param = copy_signature.parameters.get(name) + assert copy_param is not None, f"copy() signature is missing the {name} param" + + def test_copy_build_request(self) -> None: + options = FinalRequestOptions(method="get", url="/foo") + + def build_request(options: FinalRequestOptions) -> None: + client = self.client.copy() + client._build_request(options) + + # ensure that the machinery is warmed up before tracing starts. + build_request(options) + gc.collect() + + tracemalloc.start(1000) + + snapshot_before = tracemalloc.take_snapshot() + + ITERATIONS = 10 + for _ in range(ITERATIONS): + build_request(options) + + gc.collect() + snapshot_after = tracemalloc.take_snapshot() + + tracemalloc.stop() + + def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: + if diff.count == 0: + # Avoid false positives by considering only leaks (i.e. allocations that persist). + return + + if diff.count % ITERATIONS != 0: + # Avoid false positives by considering only leaks that appear per iteration. + return + + for frame in diff.traceback: + if any( + frame.filename.endswith(fragment) + for fragment in [ + # to_raw_response_wrapper leaks through the @functools.wraps() decorator. + # + # removing the decorator fixes the leak for reasons we don't understand. + "digitalocean_genai_sdk/_legacy_response.py", + "digitalocean_genai_sdk/_response.py", + # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. + "digitalocean_genai_sdk/_compat.py", + # Standard library leaks we don't care about. + "/logging/__init__.py", + ] + ): + return + + leaks.append(diff) + + leaks: list[tracemalloc.StatisticDiff] = [] + for diff in snapshot_after.compare_to(snapshot_before, "traceback"): + add_leak(leaks, diff) + if leaks: + for leak in leaks: + print("MEMORY LEAK:", leak) + for frame in leak.traceback: + print(frame) + raise AssertionError() + + def test_request_timeout(self) -> None: + request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + request = self.client._build_request( + FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) + ) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(100.0) + + def test_client_timeout_option(self) -> None: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(0) + + def test_http_client_timeout_option(self) -> None: + # custom timeout given to the httpx client should be used + with httpx.Client(timeout=None) as http_client: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(None) + + # no timeout given to the httpx client should not use the httpx default + with httpx.Client() as http_client: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + # explicitly passing the default timeout currently results in it being ignored + with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT # our default + + async def test_invalid_http_client(self) -> None: + with pytest.raises(TypeError, match="Invalid `http_client` arg"): + async with httpx.AsyncClient() as http_client: + DigitaloceanGenaiSDK( + base_url=base_url, + api_key=api_key, + _strict_response_validation=True, + http_client=cast(Any, http_client), + ) + + def test_default_headers_option(self) -> None: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "bar" + assert request.headers.get("x-stainless-lang") == "python" + + client2 = DigitaloceanGenaiSDK( + base_url=base_url, + api_key=api_key, + _strict_response_validation=True, + default_headers={ + "X-Foo": "stainless", + "X-Stainless-Lang": "my-overriding-header", + }, + ) + request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "stainless" + assert request.headers.get("x-stainless-lang") == "my-overriding-header" + + def test_validate_headers(self) -> None: + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("Authorization") == f"Bearer {api_key}" + + with pytest.raises(DigitaloceanGenaiSDKError): + with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + _ = client2 + + def test_default_query_option(self) -> None: + client = DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + url = httpx.URL(request.url) + assert dict(url.params) == {"query_param": "bar"} + + request = client._build_request( + FinalRequestOptions( + method="get", + url="/foo", + params={"foo": "baz", "query_param": "overridden"}, + ) + ) + url = httpx.URL(request.url) + assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} + + def test_request_extra_json(self) -> None: + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": False} + + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"baz": False} + + # `extra_json` takes priority over `json_data` when keys clash + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar", "baz": True}, + extra_json={"baz": None}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": None} + + def test_request_extra_headers(self) -> None: + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options(extra_headers={"X-Foo": "Foo"}), + ), + ) + assert request.headers.get("X-Foo") == "Foo" + + # `extra_headers` takes priority over `default_headers` when keys clash + request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_headers={"X-Bar": "false"}, + ), + ), + ) + assert request.headers.get("X-Bar") == "false" + + def test_request_extra_query(self) -> None: + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_query={"my_query_param": "Foo"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"my_query_param": "Foo"} + + # if both `query` and `extra_query` are given, they are merged + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"bar": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"bar": "1", "foo": "2"} + + # `extra_query` takes priority over `query` when keys clash + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"foo": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"foo": "2"} + + def test_multipart_repeating_array(self, client: DigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions.construct( + method="get", + url="/foo", + headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, + json_data={"array": ["foo", "bar"]}, + files=[("foo.txt", b"hello world")], + ) + ) + + assert request.read().split(b"\r\n") == [ + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"foo", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"bar", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="foo.txt"; filename="upload"', + b"Content-Type: application/octet-stream", + b"", + b"hello world", + b"--6b7ba517decee4a450543ea6ae821c82--", + b"", + ] + + @pytest.mark.respx(base_url=base_url) + def test_basic_union_response(self, respx_mock: MockRouter) -> None: + class Model1(BaseModel): + name: str + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + @pytest.mark.respx(base_url=base_url) + def test_union_response_different_types(self, respx_mock: MockRouter) -> None: + """Union of objects with the same field name using a different type""" + + class Model1(BaseModel): + foo: int + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) + + response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model1) + assert response.foo == 1 + + @pytest.mark.respx(base_url=base_url) + def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + """ + Response that sets Content-Type to something other than application/json but returns json data + """ + + class Model(BaseModel): + foo: int + + respx_mock.get("/foo").mock( + return_value=httpx.Response( + 200, + content=json.dumps({"foo": 2}), + headers={"Content-Type": "application/text"}, + ) + ) + + response = self.client.get("/foo", cast_to=Model) + assert isinstance(response, Model) + assert response.foo == 2 + + def test_base_url_setter(self) -> None: + client = DigitaloceanGenaiSDK( + base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True + ) + assert client.base_url == "https://example.com/from_init/" + + client.base_url = "https://example.com/from_setter" # type: ignore[assignment] + + assert client.base_url == "https://example.com/from_setter/" + + def test_base_url_env(self) -> None: + with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"): + client = DigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True) + assert client.base_url == "http://localhost:5000/from/env/" + + @pytest.mark.parametrize( + "client", + [ + DigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + ), + DigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + _strict_response_validation=True, + http_client=httpx.Client(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + + @pytest.mark.parametrize( + "client", + [ + DigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + ), + DigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + _strict_response_validation=True, + http_client=httpx.Client(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + + @pytest.mark.parametrize( + "client", + [ + DigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + ), + DigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + _strict_response_validation=True, + http_client=httpx.Client(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="https://myapi.com/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "https://myapi.com/foo" + + def test_copied_client_does_not_close_http(self) -> None: + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + assert not client.is_closed() + + copied = client.copy() + assert copied is not client + + del copied + + assert not client.is_closed() + + def test_client_context_manager(self) -> None: + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + with client as c2: + assert c2 is client + assert not c2.is_closed() + assert not client.is_closed() + assert client.is_closed() + + @pytest.mark.respx(base_url=base_url) + def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) + + with pytest.raises(APIResponseValidationError) as exc: + self.client.get("/foo", cast_to=Model) + + assert isinstance(exc.value.__cause__, ValidationError) + + def test_client_max_retries_validation(self) -> None: + with pytest.raises(TypeError, match=r"max_retries cannot be None"): + DigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) + ) + + @pytest.mark.respx(base_url=base_url) + def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + name: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) + + strict_client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + + with pytest.raises(APIResponseValidationError): + strict_client.get("/foo", cast_to=Model) + + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False) + + response = client.get("/foo", cast_to=Model) + assert isinstance(response, str) # type: ignore[unreachable] + + @pytest.mark.parametrize( + "remaining_retries,retry_after,timeout", + [ + [3, "20", 20], + [3, "0", 0.5], + [3, "-10", 0.5], + [3, "60", 60], + [3, "61", 0.5], + [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], + [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], + [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], + [3, "99999999999999999999999999999999999", 0.5], + [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "", 0.5], + [2, "", 0.5 * 2.0], + [1, "", 0.5 * 4.0], + [-1100, "", 8], # test large number potentially overflowing + ], + ) + @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) + def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + + headers = httpx.Headers({"retry-after": retry_after}) + options = FinalRequestOptions(method="get", url="/foo", max_retries=3) + calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] + + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error")) + + with pytest.raises(APITimeoutError): + self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}) + + assert _get_open_connections(self.client) == 0 + + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + respx_mock.get("/assistants").mock(return_value=httpx.Response(500)) + + with pytest.raises(APIStatusError): + self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}) + + assert _get_open_connections(self.client) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.parametrize("failure_mode", ["status", "exception"]) + def test_retries_taken( + self, + client: DigitaloceanGenaiSDK, + failures_before_success: int, + failure_mode: Literal["status", "exception"], + respx_mock: MockRouter, + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + if failure_mode == "exception": + raise RuntimeError("oops") + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.get("/assistants").mock(side_effect=retry_handler) + + response = client.assistants.with_raw_response.list() + + assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_omit_retry_count_header( + self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.get("/assistants").mock(side_effect=retry_handler) + + response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) + + assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + def test_overwrite_retry_count_header( + self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.get("/assistants").mock(side_effect=retry_handler) + + response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) + + assert response.http_request.headers.get("x-stainless-retry-count") == "42" + + +class TestAsyncDigitaloceanGenaiSDK: + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_raw_response(self, respx_mock: MockRouter) -> None: + respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await self.client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') + ) + + response = await self.client.post("/foo", cast_to=httpx.Response) + assert response.status_code == 200 + assert isinstance(response, httpx.Response) + assert response.json() == {"foo": "bar"} + + def test_copy(self) -> None: + copied = self.client.copy() + assert id(copied) != id(self.client) + + copied = self.client.copy(api_key="another My API Key") + assert copied.api_key == "another My API Key" + assert self.client.api_key == "My API Key" + + def test_copy_default_options(self) -> None: + # options that have a default are overridden correctly + copied = self.client.copy(max_retries=7) + assert copied.max_retries == 7 + assert self.client.max_retries == 2 + + copied2 = copied.copy(max_retries=6) + assert copied2.max_retries == 6 + assert copied.max_retries == 7 + + # timeout + assert isinstance(self.client.timeout, httpx.Timeout) + copied = self.client.copy(timeout=None) + assert copied.timeout is None + assert isinstance(self.client.timeout, httpx.Timeout) + + def test_copy_default_headers(self) -> None: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + ) + assert client.default_headers["X-Foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert copied.default_headers["X-Foo"] == "bar" + + # merges already given headers + copied = client.copy(default_headers={"X-Bar": "stainless"}) + assert copied.default_headers["X-Foo"] == "bar" + assert copied.default_headers["X-Bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_headers={"X-Foo": "stainless"}) + assert copied.default_headers["X-Foo"] == "stainless" + + # set_default_headers + + # completely overrides already set values + copied = client.copy(set_default_headers={}) + assert copied.default_headers.get("X-Foo") is None + + copied = client.copy(set_default_headers={"X-Bar": "Robert"}) + assert copied.default_headers["X-Bar"] == "Robert" + + with pytest.raises( + ValueError, + match="`default_headers` and `set_default_headers` arguments are mutually exclusive", + ): + client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + + def test_copy_default_query(self) -> None: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} + ) + assert _get_params(client)["foo"] == "bar" + + # does not override the already given value when not specified + copied = client.copy() + assert _get_params(copied)["foo"] == "bar" + + # merges already given params + copied = client.copy(default_query={"bar": "stainless"}) + params = _get_params(copied) + assert params["foo"] == "bar" + assert params["bar"] == "stainless" + + # uses new values for any already given headers + copied = client.copy(default_query={"foo": "stainless"}) + assert _get_params(copied)["foo"] == "stainless" + + # set_default_query + + # completely overrides already set values + copied = client.copy(set_default_query={}) + assert _get_params(copied) == {} + + copied = client.copy(set_default_query={"bar": "Robert"}) + assert _get_params(copied)["bar"] == "Robert" + + with pytest.raises( + ValueError, + # TODO: update + match="`default_query` and `set_default_query` arguments are mutually exclusive", + ): + client.copy(set_default_query={}, default_query={"foo": "Bar"}) + + def test_copy_signature(self) -> None: + # ensure the same parameters that can be passed to the client are defined in the `.copy()` method + init_signature = inspect.signature( + # mypy doesn't like that we access the `__init__` property. + self.client.__init__, # type: ignore[misc] + ) + copy_signature = inspect.signature(self.client.copy) + exclude_params = {"transport", "proxies", "_strict_response_validation"} + + for name in init_signature.parameters.keys(): + if name in exclude_params: + continue + + copy_param = copy_signature.parameters.get(name) + assert copy_param is not None, f"copy() signature is missing the {name} param" + + def test_copy_build_request(self) -> None: + options = FinalRequestOptions(method="get", url="/foo") + + def build_request(options: FinalRequestOptions) -> None: + client = self.client.copy() + client._build_request(options) + + # ensure that the machinery is warmed up before tracing starts. + build_request(options) + gc.collect() + + tracemalloc.start(1000) + + snapshot_before = tracemalloc.take_snapshot() + + ITERATIONS = 10 + for _ in range(ITERATIONS): + build_request(options) + + gc.collect() + snapshot_after = tracemalloc.take_snapshot() + + tracemalloc.stop() + + def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: + if diff.count == 0: + # Avoid false positives by considering only leaks (i.e. allocations that persist). + return + + if diff.count % ITERATIONS != 0: + # Avoid false positives by considering only leaks that appear per iteration. + return + + for frame in diff.traceback: + if any( + frame.filename.endswith(fragment) + for fragment in [ + # to_raw_response_wrapper leaks through the @functools.wraps() decorator. + # + # removing the decorator fixes the leak for reasons we don't understand. + "digitalocean_genai_sdk/_legacy_response.py", + "digitalocean_genai_sdk/_response.py", + # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. + "digitalocean_genai_sdk/_compat.py", + # Standard library leaks we don't care about. + "/logging/__init__.py", + ] + ): + return + + leaks.append(diff) + + leaks: list[tracemalloc.StatisticDiff] = [] + for diff in snapshot_after.compare_to(snapshot_before, "traceback"): + add_leak(leaks, diff) + if leaks: + for leak in leaks: + print("MEMORY LEAK:", leak) + for frame in leak.traceback: + print(frame) + raise AssertionError() + + async def test_request_timeout(self) -> None: + request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + request = self.client._build_request( + FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) + ) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(100.0) + + async def test_client_timeout_option(self) -> None: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(0) + + async def test_http_client_timeout_option(self) -> None: + # custom timeout given to the httpx client should be used + async with httpx.AsyncClient(timeout=None) as http_client: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == httpx.Timeout(None) + + # no timeout given to the httpx client should not use the httpx default + async with httpx.AsyncClient() as http_client: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT + + # explicitly passing the default timeout currently results in it being ignored + async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + ) + + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore + assert timeout == DEFAULT_TIMEOUT # our default + + def test_invalid_http_client(self) -> None: + with pytest.raises(TypeError, match="Invalid `http_client` arg"): + with httpx.Client() as http_client: + AsyncDigitaloceanGenaiSDK( + base_url=base_url, + api_key=api_key, + _strict_response_validation=True, + http_client=cast(Any, http_client), + ) + + def test_default_headers_option(self) -> None: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "bar" + assert request.headers.get("x-stainless-lang") == "python" + + client2 = AsyncDigitaloceanGenaiSDK( + base_url=base_url, + api_key=api_key, + _strict_response_validation=True, + default_headers={ + "X-Foo": "stainless", + "X-Stainless-Lang": "my-overriding-header", + }, + ) + request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("x-foo") == "stainless" + assert request.headers.get("x-stainless-lang") == "my-overriding-header" + + def test_validate_headers(self) -> None: + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("Authorization") == f"Bearer {api_key}" + + with pytest.raises(DigitaloceanGenaiSDKError): + with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + _ = client2 + + def test_default_query_option(self) -> None: + client = AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} + ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + url = httpx.URL(request.url) + assert dict(url.params) == {"query_param": "bar"} + + request = client._build_request( + FinalRequestOptions( + method="get", + url="/foo", + params={"foo": "baz", "query_param": "overridden"}, + ) + ) + url = httpx.URL(request.url) + assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} + + def test_request_extra_json(self) -> None: + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": False} + + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + extra_json={"baz": False}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"baz": False} + + # `extra_json` takes priority over `json_data` when keys clash + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar", "baz": True}, + extra_json={"baz": None}, + ), + ) + data = json.loads(request.content.decode("utf-8")) + assert data == {"foo": "bar", "baz": None} + + def test_request_extra_headers(self) -> None: + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options(extra_headers={"X-Foo": "Foo"}), + ), + ) + assert request.headers.get("X-Foo") == "Foo" + + # `extra_headers` takes priority over `default_headers` when keys clash + request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_headers={"X-Bar": "false"}, + ), + ), + ) + assert request.headers.get("X-Bar") == "false" + + def test_request_extra_query(self) -> None: + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + extra_query={"my_query_param": "Foo"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"my_query_param": "Foo"} + + # if both `query` and `extra_query` are given, they are merged + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"bar": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"bar": "1", "foo": "2"} + + # `extra_query` takes priority over `query` when keys clash + request = self.client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + **make_request_options( + query={"foo": "1"}, + extra_query={"foo": "2"}, + ), + ), + ) + params = dict(request.url.params) + assert params == {"foo": "2"} + + def test_multipart_repeating_array(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + request = async_client._build_request( + FinalRequestOptions.construct( + method="get", + url="/foo", + headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, + json_data={"array": ["foo", "bar"]}, + files=[("foo.txt", b"hello world")], + ) + ) + + assert request.read().split(b"\r\n") == [ + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"foo", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="array[]"', + b"", + b"bar", + b"--6b7ba517decee4a450543ea6ae821c82", + b'Content-Disposition: form-data; name="foo.txt"; filename="upload"', + b"Content-Type: application/octet-stream", + b"", + b"hello world", + b"--6b7ba517decee4a450543ea6ae821c82--", + b"", + ] + + @pytest.mark.respx(base_url=base_url) + async def test_basic_union_response(self, respx_mock: MockRouter) -> None: + class Model1(BaseModel): + name: str + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + @pytest.mark.respx(base_url=base_url) + async def test_union_response_different_types(self, respx_mock: MockRouter) -> None: + """Union of objects with the same field name using a different type""" + + class Model1(BaseModel): + foo: int + + class Model2(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model2) + assert response.foo == "bar" + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) + + response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + assert isinstance(response, Model1) + assert response.foo == 1 + + @pytest.mark.respx(base_url=base_url) + async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + """ + Response that sets Content-Type to something other than application/json but returns json data + """ + + class Model(BaseModel): + foo: int + + respx_mock.get("/foo").mock( + return_value=httpx.Response( + 200, + content=json.dumps({"foo": 2}), + headers={"Content-Type": "application/text"}, + ) + ) + + response = await self.client.get("/foo", cast_to=Model) + assert isinstance(response, Model) + assert response.foo == 2 + + def test_base_url_setter(self) -> None: + client = AsyncDigitaloceanGenaiSDK( + base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True + ) + assert client.base_url == "https://example.com/from_init/" + + client.base_url = "https://example.com/from_setter" # type: ignore[assignment] + + assert client.base_url == "https://example.com/from_setter/" + + def test_base_url_env(self) -> None: + with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"): + client = AsyncDigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True) + assert client.base_url == "http://localhost:5000/from/env/" + + @pytest.mark.parametrize( + "client", + [ + AsyncDigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + ), + AsyncDigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + _strict_response_validation=True, + http_client=httpx.AsyncClient(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + + @pytest.mark.parametrize( + "client", + [ + AsyncDigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + ), + AsyncDigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + _strict_response_validation=True, + http_client=httpx.AsyncClient(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "http://localhost:5000/custom/path/foo" + + @pytest.mark.parametrize( + "client", + [ + AsyncDigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + ), + AsyncDigitaloceanGenaiSDK( + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + _strict_response_validation=True, + http_client=httpx.AsyncClient(), + ), + ], + ids=["standard", "custom http client"], + ) + def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None: + request = client._build_request( + FinalRequestOptions( + method="post", + url="https://myapi.com/foo", + json_data={"foo": "bar"}, + ), + ) + assert request.url == "https://myapi.com/foo" + + async def test_copied_client_does_not_close_http(self) -> None: + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + assert not client.is_closed() + + copied = client.copy() + assert copied is not client + + del copied + + await asyncio.sleep(0.2) + assert not client.is_closed() + + async def test_client_context_manager(self) -> None: + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + async with client as c2: + assert c2 is client + assert not c2.is_closed() + assert not client.is_closed() + assert client.is_closed() + + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + foo: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) + + with pytest.raises(APIResponseValidationError) as exc: + await self.client.get("/foo", cast_to=Model) + + assert isinstance(exc.value.__cause__, ValidationError) + + async def test_client_max_retries_validation(self) -> None: + with pytest.raises(TypeError, match=r"max_retries cannot be None"): + AsyncDigitaloceanGenaiSDK( + base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) + ) + + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + name: str + + respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) + + strict_client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + + with pytest.raises(APIResponseValidationError): + await strict_client.get("/foo", cast_to=Model) + + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False) + + response = await client.get("/foo", cast_to=Model) + assert isinstance(response, str) # type: ignore[unreachable] + + @pytest.mark.parametrize( + "remaining_retries,retry_after,timeout", + [ + [3, "20", 20], + [3, "0", 0.5], + [3, "-10", 0.5], + [3, "60", 60], + [3, "61", 0.5], + [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], + [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], + [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], + [3, "99999999999999999999999999999999999", 0.5], + [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "", 0.5], + [2, "", 0.5 * 2.0], + [1, "", 0.5 * 4.0], + [-1100, "", 8], # test large number potentially overflowing + ], + ) + @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) + @pytest.mark.asyncio + async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + + headers = httpx.Headers({"retry-after": retry_after}) + options = FinalRequestOptions(method="get", url="/foo", max_retries=3) + calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] + + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error")) + + with pytest.raises(APITimeoutError): + await self.client.get( + "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}} + ) + + assert _get_open_connections(self.client) == 0 + + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + respx_mock.get("/assistants").mock(return_value=httpx.Response(500)) + + with pytest.raises(APIStatusError): + await self.client.get( + "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}} + ) + + assert _get_open_connections(self.client) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + @pytest.mark.parametrize("failure_mode", ["status", "exception"]) + async def test_retries_taken( + self, + async_client: AsyncDigitaloceanGenaiSDK, + failures_before_success: int, + failure_mode: Literal["status", "exception"], + respx_mock: MockRouter, + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + if failure_mode == "exception": + raise RuntimeError("oops") + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.get("/assistants").mock(side_effect=retry_handler) + + response = await client.assistants.with_raw_response.list() + + assert response.retries_taken == failures_before_success + assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_omit_retry_count_header( + self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.get("/assistants").mock(side_effect=retry_handler) + + response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) + + assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + + @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) + @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_overwrite_retry_count_header( + self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + ) -> None: + client = async_client.with_options(max_retries=4) + + nb_retries = 0 + + def retry_handler(_request: httpx.Request) -> httpx.Response: + nonlocal nb_retries + if nb_retries < failures_before_success: + nb_retries += 1 + return httpx.Response(500) + return httpx.Response(200) + + respx_mock.get("/assistants").mock(side_effect=retry_handler) + + response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) + + assert response.http_request.headers.get("x-stainless-retry-count") == "42" + + def test_get_platform(self) -> None: + # A previous implementation of asyncify could leave threads unterminated when + # used with nest_asyncio. + # + # Since nest_asyncio.apply() is global and cannot be un-applied, this + # test is run in a separate process to avoid affecting other tests. + test_code = dedent(""" + import asyncio + import nest_asyncio + import threading + + from digitalocean_genai_sdk._utils import asyncify + from digitalocean_genai_sdk._base_client import get_platform + + async def test_main() -> None: + result = await asyncify(get_platform)() + print(result) + for thread in threading.enumerate(): + print(thread.name) + + nest_asyncio.apply() + asyncio.run(test_main()) + """) + with subprocess.Popen( + [sys.executable, "-c", test_code], + text=True, + ) as process: + timeout = 10 # seconds + + start_time = time.monotonic() + while True: + return_code = process.poll() + if return_code is not None: + if return_code != 0: + raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") + + # success + break + + if time.monotonic() - start_time > timeout: + process.kill() + raise AssertionError("calling get_platform using asyncify resulted in a hung process") + + time.sleep(0.1) diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py new file mode 100644 index 00000000..317130ef --- /dev/null +++ b/tests/test_deepcopy.py @@ -0,0 +1,58 @@ +from digitalocean_genai_sdk._utils import deepcopy_minimal + + +def assert_different_identities(obj1: object, obj2: object) -> None: + assert obj1 == obj2 + assert id(obj1) != id(obj2) + + +def test_simple_dict() -> None: + obj1 = {"foo": "bar"} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + + +def test_nested_dict() -> None: + obj1 = {"foo": {"bar": True}} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert_different_identities(obj1["foo"], obj2["foo"]) + + +def test_complex_nested_dict() -> None: + obj1 = {"foo": {"bar": [{"hello": "world"}]}} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert_different_identities(obj1["foo"], obj2["foo"]) + assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"]) + assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0]) + + +def test_simple_list() -> None: + obj1 = ["a", "b", "c"] + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + + +def test_nested_list() -> None: + obj1 = ["a", [1, 2, 3]] + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert_different_identities(obj1[1], obj2[1]) + + +class MyObject: ... + + +def test_ignores_other_types() -> None: + # custom classes + my_obj = MyObject() + obj1 = {"foo": my_obj} + obj2 = deepcopy_minimal(obj1) + assert_different_identities(obj1, obj2) + assert obj1["foo"] is my_obj + + # tuples + obj3 = ("a", "b") + obj4 = deepcopy_minimal(obj3) + assert obj3 is obj4 diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py new file mode 100644 index 00000000..aad87e09 --- /dev/null +++ b/tests/test_extract_files.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import Sequence + +import pytest + +from digitalocean_genai_sdk._types import FileTypes +from digitalocean_genai_sdk._utils import extract_files + + +def test_removes_files_from_input() -> None: + query = {"foo": "bar"} + assert extract_files(query, paths=[]) == [] + assert query == {"foo": "bar"} + + query2 = {"foo": b"Bar", "hello": "world"} + assert extract_files(query2, paths=[["foo"]]) == [("foo", b"Bar")] + assert query2 == {"hello": "world"} + + query3 = {"foo": {"foo": {"bar": b"Bar"}}, "hello": "world"} + assert extract_files(query3, paths=[["foo", "foo", "bar"]]) == [("foo[foo][bar]", b"Bar")] + assert query3 == {"foo": {"foo": {}}, "hello": "world"} + + query4 = {"foo": {"bar": b"Bar", "baz": "foo"}, "hello": "world"} + assert extract_files(query4, paths=[["foo", "bar"]]) == [("foo[bar]", b"Bar")] + assert query4 == {"hello": "world", "foo": {"baz": "foo"}} + + +def test_multiple_files() -> None: + query = {"documents": [{"file": b"My first file"}, {"file": b"My second file"}]} + assert extract_files(query, paths=[["documents", "", "file"]]) == [ + ("documents[][file]", b"My first file"), + ("documents[][file]", b"My second file"), + ] + assert query == {"documents": [{}, {}]} + + +@pytest.mark.parametrize( + "query,paths,expected", + [ + [ + {"foo": {"bar": "baz"}}, + [["foo", "", "bar"]], + [], + ], + [ + {"foo": ["bar", "baz"]}, + [["foo", "bar"]], + [], + ], + [ + {"foo": {"bar": "baz"}}, + [["foo", "foo"]], + [], + ], + ], + ids=["dict expecting array", "array expecting dict", "unknown keys"], +) +def test_ignores_incorrect_paths( + query: dict[str, object], + paths: Sequence[Sequence[str]], + expected: list[tuple[str, FileTypes]], +) -> None: + assert extract_files(query, paths=paths) == expected diff --git a/tests/test_files.py b/tests/test_files.py new file mode 100644 index 00000000..f3a07ce0 --- /dev/null +++ b/tests/test_files.py @@ -0,0 +1,51 @@ +from pathlib import Path + +import anyio +import pytest +from dirty_equals import IsDict, IsList, IsBytes, IsTuple + +from digitalocean_genai_sdk._files import to_httpx_files, async_to_httpx_files + +readme_path = Path(__file__).parent.parent.joinpath("README.md") + + +def test_pathlib_includes_file_name() -> None: + result = to_httpx_files({"file": readme_path}) + print(result) + assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) + + +def test_tuple_input() -> None: + result = to_httpx_files([("file", readme_path)]) + print(result) + assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes()))) + + +@pytest.mark.asyncio +async def test_async_pathlib_includes_file_name() -> None: + result = await async_to_httpx_files({"file": readme_path}) + print(result) + assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) + + +@pytest.mark.asyncio +async def test_async_supports_anyio_path() -> None: + result = await async_to_httpx_files({"file": anyio.Path(readme_path)}) + print(result) + assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) + + +@pytest.mark.asyncio +async def test_async_tuple_input() -> None: + result = await async_to_httpx_files([("file", readme_path)]) + print(result) + assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes()))) + + +def test_string_not_allowed() -> None: + with pytest.raises(TypeError, match="Expected file types input to be a FileContent type or to be a tuple"): + to_httpx_files( + { + "file": "foo", # type: ignore + } + ) diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 00000000..0be34866 --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,891 @@ +import json +from typing import Any, Dict, List, Union, Optional, cast +from datetime import datetime, timezone +from typing_extensions import Literal, Annotated, TypeAliasType + +import pytest +import pydantic +from pydantic import Field + +from digitalocean_genai_sdk._utils import PropertyInfo +from digitalocean_genai_sdk._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from digitalocean_genai_sdk._models import BaseModel, construct_type + + +class BasicModel(BaseModel): + foo: str + + +@pytest.mark.parametrize("value", ["hello", 1], ids=["correct type", "mismatched"]) +def test_basic(value: object) -> None: + m = BasicModel.construct(foo=value) + assert m.foo == value + + +def test_directly_nested_model() -> None: + class NestedModel(BaseModel): + nested: BasicModel + + m = NestedModel.construct(nested={"foo": "Foo!"}) + assert m.nested.foo == "Foo!" + + # mismatched types + m = NestedModel.construct(nested="hello!") + assert cast(Any, m.nested) == "hello!" + + +def test_optional_nested_model() -> None: + class NestedModel(BaseModel): + nested: Optional[BasicModel] + + m1 = NestedModel.construct(nested=None) + assert m1.nested is None + + m2 = NestedModel.construct(nested={"foo": "bar"}) + assert m2.nested is not None + assert m2.nested.foo == "bar" + + # mismatched types + m3 = NestedModel.construct(nested={"foo"}) + assert isinstance(cast(Any, m3.nested), set) + assert cast(Any, m3.nested) == {"foo"} + + +def test_list_nested_model() -> None: + class NestedModel(BaseModel): + nested: List[BasicModel] + + m = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) + assert m.nested is not None + assert isinstance(m.nested, list) + assert len(m.nested) == 2 + assert m.nested[0].foo == "bar" + assert m.nested[1].foo == "2" + + # mismatched types + m = NestedModel.construct(nested=True) + assert cast(Any, m.nested) is True + + m = NestedModel.construct(nested=[False]) + assert cast(Any, m.nested) == [False] + + +def test_optional_list_nested_model() -> None: + class NestedModel(BaseModel): + nested: Optional[List[BasicModel]] + + m1 = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) + assert m1.nested is not None + assert isinstance(m1.nested, list) + assert len(m1.nested) == 2 + assert m1.nested[0].foo == "bar" + assert m1.nested[1].foo == "2" + + m2 = NestedModel.construct(nested=None) + assert m2.nested is None + + # mismatched types + m3 = NestedModel.construct(nested={1}) + assert cast(Any, m3.nested) == {1} + + m4 = NestedModel.construct(nested=[False]) + assert cast(Any, m4.nested) == [False] + + +def test_list_optional_items_nested_model() -> None: + class NestedModel(BaseModel): + nested: List[Optional[BasicModel]] + + m = NestedModel.construct(nested=[None, {"foo": "bar"}]) + assert m.nested is not None + assert isinstance(m.nested, list) + assert len(m.nested) == 2 + assert m.nested[0] is None + assert m.nested[1] is not None + assert m.nested[1].foo == "bar" + + # mismatched types + m3 = NestedModel.construct(nested="foo") + assert cast(Any, m3.nested) == "foo" + + m4 = NestedModel.construct(nested=[False]) + assert cast(Any, m4.nested) == [False] + + +def test_list_mismatched_type() -> None: + class NestedModel(BaseModel): + nested: List[str] + + m = NestedModel.construct(nested=False) + assert cast(Any, m.nested) is False + + +def test_raw_dictionary() -> None: + class NestedModel(BaseModel): + nested: Dict[str, str] + + m = NestedModel.construct(nested={"hello": "world"}) + assert m.nested == {"hello": "world"} + + # mismatched types + m = NestedModel.construct(nested=False) + assert cast(Any, m.nested) is False + + +def test_nested_dictionary_model() -> None: + class NestedModel(BaseModel): + nested: Dict[str, BasicModel] + + m = NestedModel.construct(nested={"hello": {"foo": "bar"}}) + assert isinstance(m.nested, dict) + assert m.nested["hello"].foo == "bar" + + # mismatched types + m = NestedModel.construct(nested={"hello": False}) + assert cast(Any, m.nested["hello"]) is False + + +def test_unknown_fields() -> None: + m1 = BasicModel.construct(foo="foo", unknown=1) + assert m1.foo == "foo" + assert cast(Any, m1).unknown == 1 + + m2 = BasicModel.construct(foo="foo", unknown={"foo_bar": True}) + assert m2.foo == "foo" + assert cast(Any, m2).unknown == {"foo_bar": True} + + assert model_dump(m2) == {"foo": "foo", "unknown": {"foo_bar": True}} + + +def test_strict_validation_unknown_fields() -> None: + class Model(BaseModel): + foo: str + + model = parse_obj(Model, dict(foo="hello!", user="Robert")) + assert model.foo == "hello!" + assert cast(Any, model).user == "Robert" + + assert model_dump(model) == {"foo": "hello!", "user": "Robert"} + + +def test_aliases() -> None: + class Model(BaseModel): + my_field: int = Field(alias="myField") + + m = Model.construct(myField=1) + assert m.my_field == 1 + + # mismatched types + m = Model.construct(myField={"hello": False}) + assert cast(Any, m.my_field) == {"hello": False} + + +def test_repr() -> None: + model = BasicModel(foo="bar") + assert str(model) == "BasicModel(foo='bar')" + assert repr(model) == "BasicModel(foo='bar')" + + +def test_repr_nested_model() -> None: + class Child(BaseModel): + name: str + age: int + + class Parent(BaseModel): + name: str + child: Child + + model = Parent(name="Robert", child=Child(name="Foo", age=5)) + assert str(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" + assert repr(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" + + +def test_optional_list() -> None: + class Submodel(BaseModel): + name: str + + class Model(BaseModel): + items: Optional[List[Submodel]] + + m = Model.construct(items=None) + assert m.items is None + + m = Model.construct(items=[]) + assert m.items == [] + + m = Model.construct(items=[{"name": "Robert"}]) + assert m.items is not None + assert len(m.items) == 1 + assert m.items[0].name == "Robert" + + +def test_nested_union_of_models() -> None: + class Submodel1(BaseModel): + bar: bool + + class Submodel2(BaseModel): + thing: str + + class Model(BaseModel): + foo: Union[Submodel1, Submodel2] + + m = Model.construct(foo={"thing": "hello"}) + assert isinstance(m.foo, Submodel2) + assert m.foo.thing == "hello" + + +def test_nested_union_of_mixed_types() -> None: + class Submodel1(BaseModel): + bar: bool + + class Model(BaseModel): + foo: Union[Submodel1, Literal[True], Literal["CARD_HOLDER"]] + + m = Model.construct(foo=True) + assert m.foo is True + + m = Model.construct(foo="CARD_HOLDER") + assert m.foo == "CARD_HOLDER" + + m = Model.construct(foo={"bar": False}) + assert isinstance(m.foo, Submodel1) + assert m.foo.bar is False + + +def test_nested_union_multiple_variants() -> None: + class Submodel1(BaseModel): + bar: bool + + class Submodel2(BaseModel): + thing: str + + class Submodel3(BaseModel): + foo: int + + class Model(BaseModel): + foo: Union[Submodel1, Submodel2, None, Submodel3] + + m = Model.construct(foo={"thing": "hello"}) + assert isinstance(m.foo, Submodel2) + assert m.foo.thing == "hello" + + m = Model.construct(foo=None) + assert m.foo is None + + m = Model.construct() + assert m.foo is None + + m = Model.construct(foo={"foo": "1"}) + assert isinstance(m.foo, Submodel3) + assert m.foo.foo == 1 + + +def test_nested_union_invalid_data() -> None: + class Submodel1(BaseModel): + level: int + + class Submodel2(BaseModel): + name: str + + class Model(BaseModel): + foo: Union[Submodel1, Submodel2] + + m = Model.construct(foo=True) + assert cast(bool, m.foo) is True + + m = Model.construct(foo={"name": 3}) + if PYDANTIC_V2: + assert isinstance(m.foo, Submodel1) + assert m.foo.name == 3 # type: ignore + else: + assert isinstance(m.foo, Submodel2) + assert m.foo.name == "3" + + +def test_list_of_unions() -> None: + class Submodel1(BaseModel): + level: int + + class Submodel2(BaseModel): + name: str + + class Model(BaseModel): + items: List[Union[Submodel1, Submodel2]] + + m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) + assert len(m.items) == 2 + assert isinstance(m.items[0], Submodel1) + assert m.items[0].level == 1 + assert isinstance(m.items[1], Submodel2) + assert m.items[1].name == "Robert" + + m = Model.construct(items=[{"level": -1}, 156]) + assert len(m.items) == 2 + assert isinstance(m.items[0], Submodel1) + assert m.items[0].level == -1 + assert cast(Any, m.items[1]) == 156 + + +def test_union_of_lists() -> None: + class SubModel1(BaseModel): + level: int + + class SubModel2(BaseModel): + name: str + + class Model(BaseModel): + items: Union[List[SubModel1], List[SubModel2]] + + # with one valid entry + m = Model.construct(items=[{"name": "Robert"}]) + assert len(m.items) == 1 + assert isinstance(m.items[0], SubModel2) + assert m.items[0].name == "Robert" + + # with two entries pointing to different types + m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) + assert len(m.items) == 2 + assert isinstance(m.items[0], SubModel1) + assert m.items[0].level == 1 + assert isinstance(m.items[1], SubModel1) + assert cast(Any, m.items[1]).name == "Robert" + + # with two entries pointing to *completely* different types + m = Model.construct(items=[{"level": -1}, 156]) + assert len(m.items) == 2 + assert isinstance(m.items[0], SubModel1) + assert m.items[0].level == -1 + assert cast(Any, m.items[1]) == 156 + + +def test_dict_of_union() -> None: + class SubModel1(BaseModel): + name: str + + class SubModel2(BaseModel): + foo: str + + class Model(BaseModel): + data: Dict[str, Union[SubModel1, SubModel2]] + + m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) + assert len(list(m.data.keys())) == 2 + assert isinstance(m.data["hello"], SubModel1) + assert m.data["hello"].name == "there" + assert isinstance(m.data["foo"], SubModel2) + assert m.data["foo"].foo == "bar" + + # TODO: test mismatched type + + +def test_double_nested_union() -> None: + class SubModel1(BaseModel): + name: str + + class SubModel2(BaseModel): + bar: str + + class Model(BaseModel): + data: Dict[str, List[Union[SubModel1, SubModel2]]] + + m = Model.construct(data={"foo": [{"bar": "baz"}, {"name": "Robert"}]}) + assert len(m.data["foo"]) == 2 + + entry1 = m.data["foo"][0] + assert isinstance(entry1, SubModel2) + assert entry1.bar == "baz" + + entry2 = m.data["foo"][1] + assert isinstance(entry2, SubModel1) + assert entry2.name == "Robert" + + # TODO: test mismatched type + + +def test_union_of_dict() -> None: + class SubModel1(BaseModel): + name: str + + class SubModel2(BaseModel): + foo: str + + class Model(BaseModel): + data: Union[Dict[str, SubModel1], Dict[str, SubModel2]] + + m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) + assert len(list(m.data.keys())) == 2 + assert isinstance(m.data["hello"], SubModel1) + assert m.data["hello"].name == "there" + assert isinstance(m.data["foo"], SubModel1) + assert cast(Any, m.data["foo"]).foo == "bar" + + +def test_iso8601_datetime() -> None: + class Model(BaseModel): + created_at: datetime + + expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc) + + if PYDANTIC_V2: + expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' + else: + expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}' + + model = Model.construct(created_at="2019-12-27T18:11:19.117Z") + assert model.created_at == expected + assert model_json(model) == expected_json + + model = parse_obj(Model, dict(created_at="2019-12-27T18:11:19.117Z")) + assert model.created_at == expected + assert model_json(model) == expected_json + + +def test_does_not_coerce_int() -> None: + class Model(BaseModel): + bar: int + + assert Model.construct(bar=1).bar == 1 + assert Model.construct(bar=10.9).bar == 10.9 + assert Model.construct(bar="19").bar == "19" # type: ignore[comparison-overlap] + assert Model.construct(bar=False).bar is False + + +def test_int_to_float_safe_conversion() -> None: + class Model(BaseModel): + float_field: float + + m = Model.construct(float_field=10) + assert m.float_field == 10.0 + assert isinstance(m.float_field, float) + + m = Model.construct(float_field=10.12) + assert m.float_field == 10.12 + assert isinstance(m.float_field, float) + + # number too big + m = Model.construct(float_field=2**53 + 1) + assert m.float_field == 2**53 + 1 + assert isinstance(m.float_field, int) + + +def test_deprecated_alias() -> None: + class Model(BaseModel): + resource_id: str = Field(alias="model_id") + + @property + def model_id(self) -> str: + return self.resource_id + + m = Model.construct(model_id="id") + assert m.model_id == "id" + assert m.resource_id == "id" + assert m.resource_id is m.model_id + + m = parse_obj(Model, {"model_id": "id"}) + assert m.model_id == "id" + assert m.resource_id == "id" + assert m.resource_id is m.model_id + + +def test_omitted_fields() -> None: + class Model(BaseModel): + resource_id: Optional[str] = None + + m = Model.construct() + assert m.resource_id is None + assert "resource_id" not in m.model_fields_set + + m = Model.construct(resource_id=None) + assert m.resource_id is None + assert "resource_id" in m.model_fields_set + + m = Model.construct(resource_id="foo") + assert m.resource_id == "foo" + assert "resource_id" in m.model_fields_set + + +def test_to_dict() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert m.to_dict() == {"FOO": "hello"} + assert m.to_dict(use_api_names=False) == {"foo": "hello"} + + m2 = Model() + assert m2.to_dict() == {} + assert m2.to_dict(exclude_unset=False) == {"FOO": None} + assert m2.to_dict(exclude_unset=False, exclude_none=True) == {} + assert m2.to_dict(exclude_unset=False, exclude_defaults=True) == {} + + m3 = Model(FOO=None) + assert m3.to_dict() == {"FOO": None} + assert m3.to_dict(exclude_none=True) == {} + assert m3.to_dict(exclude_defaults=True) == {} + + class Model2(BaseModel): + created_at: datetime + + time_str = "2024-03-21T11:39:01.275859" + m4 = Model2.construct(created_at=time_str) + assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} + assert m4.to_dict(mode="json") == {"created_at": time_str} + + if not PYDANTIC_V2: + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.to_dict(warnings=False) + + +def test_forwards_compat_model_dump_method() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert m.model_dump() == {"foo": "hello"} + assert m.model_dump(include={"bar"}) == {} + assert m.model_dump(exclude={"foo"}) == {} + assert m.model_dump(by_alias=True) == {"FOO": "hello"} + + m2 = Model() + assert m2.model_dump() == {"foo": None} + assert m2.model_dump(exclude_unset=True) == {} + assert m2.model_dump(exclude_none=True) == {} + assert m2.model_dump(exclude_defaults=True) == {} + + m3 = Model(FOO=None) + assert m3.model_dump() == {"foo": None} + assert m3.model_dump(exclude_none=True) == {} + + if not PYDANTIC_V2: + with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): + m.model_dump(round_trip=True) + + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.model_dump(warnings=False) + + +def test_compat_method_no_error_for_warnings() -> None: + class Model(BaseModel): + foo: Optional[str] + + m = Model(foo="hello") + assert isinstance(model_dump(m, warnings=False), dict) + + +def test_to_json() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert json.loads(m.to_json()) == {"FOO": "hello"} + assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"} + + if PYDANTIC_V2: + assert m.to_json(indent=None) == '{"FOO":"hello"}' + else: + assert m.to_json(indent=None) == '{"FOO": "hello"}' + + m2 = Model() + assert json.loads(m2.to_json()) == {} + assert json.loads(m2.to_json(exclude_unset=False)) == {"FOO": None} + assert json.loads(m2.to_json(exclude_unset=False, exclude_none=True)) == {} + assert json.loads(m2.to_json(exclude_unset=False, exclude_defaults=True)) == {} + + m3 = Model(FOO=None) + assert json.loads(m3.to_json()) == {"FOO": None} + assert json.loads(m3.to_json(exclude_none=True)) == {} + + if not PYDANTIC_V2: + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.to_json(warnings=False) + + +def test_forwards_compat_model_dump_json_method() -> None: + class Model(BaseModel): + foo: Optional[str] = Field(alias="FOO", default=None) + + m = Model(FOO="hello") + assert json.loads(m.model_dump_json()) == {"foo": "hello"} + assert json.loads(m.model_dump_json(include={"bar"})) == {} + assert json.loads(m.model_dump_json(include={"foo"})) == {"foo": "hello"} + assert json.loads(m.model_dump_json(by_alias=True)) == {"FOO": "hello"} + + assert m.model_dump_json(indent=2) == '{\n "foo": "hello"\n}' + + m2 = Model() + assert json.loads(m2.model_dump_json()) == {"foo": None} + assert json.loads(m2.model_dump_json(exclude_unset=True)) == {} + assert json.loads(m2.model_dump_json(exclude_none=True)) == {} + assert json.loads(m2.model_dump_json(exclude_defaults=True)) == {} + + m3 = Model(FOO=None) + assert json.loads(m3.model_dump_json()) == {"foo": None} + assert json.loads(m3.model_dump_json(exclude_none=True)) == {} + + if not PYDANTIC_V2: + with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): + m.model_dump_json(round_trip=True) + + with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): + m.model_dump_json(warnings=False) + + +def test_type_compat() -> None: + # our model type can be assigned to Pydantic's model type + + def takes_pydantic(model: pydantic.BaseModel) -> None: # noqa: ARG001 + ... + + class OurModel(BaseModel): + foo: Optional[str] = None + + takes_pydantic(OurModel()) + + +def test_annotated_types() -> None: + class Model(BaseModel): + value: str + + m = construct_type( + value={"value": "foo"}, + type_=cast(Any, Annotated[Model, "random metadata"]), + ) + assert isinstance(m, Model) + assert m.value == "foo" + + +def test_discriminated_unions_invalid_data() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + m = construct_type( + value={"type": "b", "data": "foo"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + m = construct_type( + value={"type": "a", "data": 100}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, A) + assert m.type == "a" + if PYDANTIC_V2: + assert m.data == 100 # type: ignore[comparison-overlap] + else: + # pydantic v1 automatically converts inputs to strings + # if the expected type is a str + assert m.data == "100" + + +def test_discriminated_unions_unknown_variant() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + m = construct_type( + value={"type": "c", "data": None, "new_thing": "bar"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + + # just chooses the first variant + assert isinstance(m, A) + assert m.type == "c" # type: ignore[comparison-overlap] + assert m.data == None # type: ignore[unreachable] + assert m.new_thing == "bar" + + +def test_discriminated_unions_invalid_data_nested_unions() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + class C(BaseModel): + type: Literal["c"] + + data: bool + + m = construct_type( + value={"type": "b", "data": "foo"}, + type_=cast(Any, Annotated[Union[Union[A, B], C], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + m = construct_type( + value={"type": "c", "data": "foo"}, + type_=cast(Any, Annotated[Union[Union[A, B], C], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, C) + assert m.type == "c" + assert m.data == "foo" # type: ignore[comparison-overlap] + + +def test_discriminated_unions_with_aliases_invalid_data() -> None: + class A(BaseModel): + foo_type: Literal["a"] = Field(alias="type") + + data: str + + class B(BaseModel): + foo_type: Literal["b"] = Field(alias="type") + + data: int + + m = construct_type( + value={"type": "b", "data": "foo"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="foo_type")]), + ) + assert isinstance(m, B) + assert m.foo_type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + m = construct_type( + value={"type": "a", "data": 100}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="foo_type")]), + ) + assert isinstance(m, A) + assert m.foo_type == "a" + if PYDANTIC_V2: + assert m.data == 100 # type: ignore[comparison-overlap] + else: + # pydantic v1 automatically converts inputs to strings + # if the expected type is a str + assert m.data == "100" + + +def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None: + class A(BaseModel): + type: Literal["a"] + + data: bool + + class B(BaseModel): + type: Literal["a"] + + data: int + + m = construct_type( + value={"type": "a", "data": "foo"}, + type_=cast(Any, Annotated[Union[A, B], PropertyInfo(discriminator="type")]), + ) + assert isinstance(m, B) + assert m.type == "a" + assert m.data == "foo" # type: ignore[comparison-overlap] + + +def test_discriminated_unions_invalid_data_uses_cache() -> None: + class A(BaseModel): + type: Literal["a"] + + data: str + + class B(BaseModel): + type: Literal["b"] + + data: int + + UnionType = cast(Any, Union[A, B]) + + assert not hasattr(UnionType, "__discriminator__") + + m = construct_type( + value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + discriminator = UnionType.__discriminator__ + assert discriminator is not None + + m = construct_type( + value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) + ) + assert isinstance(m, B) + assert m.type == "b" + assert m.data == "foo" # type: ignore[comparison-overlap] + + # if the discriminator details object stays the same between invocations then + # we hit the cache + assert UnionType.__discriminator__ is discriminator + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +def test_type_alias_type() -> None: + Alias = TypeAliasType("Alias", str) # pyright: ignore + + class Model(BaseModel): + alias: Alias + union: Union[int, Alias] + + m = construct_type(value={"alias": "foo", "union": "bar"}, type_=Model) + assert isinstance(m, Model) + assert isinstance(m.alias, str) + assert m.alias == "foo" + assert isinstance(m.union, str) + assert m.union == "bar" + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +def test_field_named_cls() -> None: + class Model(BaseModel): + cls: str + + m = construct_type(value={"cls": "foo"}, type_=Model) + assert isinstance(m, Model) + assert isinstance(m.cls, str) + + +def test_discriminated_union_case() -> None: + class A(BaseModel): + type: Literal["a"] + + data: bool + + class B(BaseModel): + type: Literal["b"] + + data: List[Union[A, object]] + + class ModelA(BaseModel): + type: Literal["modelA"] + + data: int + + class ModelB(BaseModel): + type: Literal["modelB"] + + required: str + + data: Union[A, B] + + # when constructing ModelA | ModelB, value data doesn't match ModelB exactly - missing `required` + m = construct_type( + value={"type": "modelB", "data": {"type": "a", "data": True}}, + type_=cast(Any, Annotated[Union[ModelA, ModelB], PropertyInfo(discriminator="type")]), + ) + + assert isinstance(m, ModelB) diff --git a/tests/test_qs.py b/tests/test_qs.py new file mode 100644 index 00000000..41824698 --- /dev/null +++ b/tests/test_qs.py @@ -0,0 +1,78 @@ +from typing import Any, cast +from functools import partial +from urllib.parse import unquote + +import pytest + +from digitalocean_genai_sdk._qs import Querystring, stringify + + +def test_empty() -> None: + assert stringify({}) == "" + assert stringify({"a": {}}) == "" + assert stringify({"a": {"b": {"c": {}}}}) == "" + + +def test_basic() -> None: + assert stringify({"a": 1}) == "a=1" + assert stringify({"a": "b"}) == "a=b" + assert stringify({"a": True}) == "a=true" + assert stringify({"a": False}) == "a=false" + assert stringify({"a": 1.23456}) == "a=1.23456" + assert stringify({"a": None}) == "" + + +@pytest.mark.parametrize("method", ["class", "function"]) +def test_nested_dotted(method: str) -> None: + if method == "class": + serialise = Querystring(nested_format="dots").stringify + else: + serialise = partial(stringify, nested_format="dots") + + assert unquote(serialise({"a": {"b": "c"}})) == "a.b=c" + assert unquote(serialise({"a": {"b": "c", "d": "e", "f": "g"}})) == "a.b=c&a.d=e&a.f=g" + assert unquote(serialise({"a": {"b": {"c": {"d": "e"}}}})) == "a.b.c.d=e" + assert unquote(serialise({"a": {"b": True}})) == "a.b=true" + + +def test_nested_brackets() -> None: + assert unquote(stringify({"a": {"b": "c"}})) == "a[b]=c" + assert unquote(stringify({"a": {"b": "c", "d": "e", "f": "g"}})) == "a[b]=c&a[d]=e&a[f]=g" + assert unquote(stringify({"a": {"b": {"c": {"d": "e"}}}})) == "a[b][c][d]=e" + assert unquote(stringify({"a": {"b": True}})) == "a[b]=true" + + +@pytest.mark.parametrize("method", ["class", "function"]) +def test_array_comma(method: str) -> None: + if method == "class": + serialise = Querystring(array_format="comma").stringify + else: + serialise = partial(stringify, array_format="comma") + + assert unquote(serialise({"in": ["foo", "bar"]})) == "in=foo,bar" + assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b]=true,false" + assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b]=true,false,true" + + +def test_array_repeat() -> None: + assert unquote(stringify({"in": ["foo", "bar"]})) == "in=foo&in=bar" + assert unquote(stringify({"a": {"b": [True, False]}})) == "a[b]=true&a[b]=false" + assert unquote(stringify({"a": {"b": [True, False, None, True]}})) == "a[b]=true&a[b]=false&a[b]=true" + assert unquote(stringify({"in": ["foo", {"b": {"c": ["d", "e"]}}]})) == "in=foo&in[b][c]=d&in[b][c]=e" + + +@pytest.mark.parametrize("method", ["class", "function"]) +def test_array_brackets(method: str) -> None: + if method == "class": + serialise = Querystring(array_format="brackets").stringify + else: + serialise = partial(stringify, array_format="brackets") + + assert unquote(serialise({"in": ["foo", "bar"]})) == "in[]=foo&in[]=bar" + assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b][]=true&a[b][]=false" + assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b][]=true&a[b][]=false&a[b][]=true" + + +def test_unknown_array_format() -> None: + with pytest.raises(NotImplementedError, match="Unknown array_format value: foo, choose from comma, repeat"): + stringify({"a": ["foo", "bar"]}, array_format=cast(Any, "foo")) diff --git a/tests/test_required_args.py b/tests/test_required_args.py new file mode 100644 index 00000000..379ac794 --- /dev/null +++ b/tests/test_required_args.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import pytest + +from digitalocean_genai_sdk._utils import required_args + + +def test_too_many_positional_params() -> None: + @required_args(["a"]) + def foo(a: str | None = None) -> str | None: + return a + + with pytest.raises(TypeError, match=r"foo\(\) takes 1 argument\(s\) but 2 were given"): + foo("a", "b") # type: ignore + + +def test_positional_param() -> None: + @required_args(["a"]) + def foo(a: str | None = None) -> str | None: + return a + + assert foo("a") == "a" + assert foo(None) is None + assert foo(a="b") == "b" + + with pytest.raises(TypeError, match="Missing required argument: 'a'"): + foo() + + +def test_keyword_only_param() -> None: + @required_args(["a"]) + def foo(*, a: str | None = None) -> str | None: + return a + + assert foo(a="a") == "a" + assert foo(a=None) is None + assert foo(a="b") == "b" + + with pytest.raises(TypeError, match="Missing required argument: 'a'"): + foo() + + +def test_multiple_params() -> None: + @required_args(["a", "b", "c"]) + def foo(a: str = "", *, b: str = "", c: str = "") -> str | None: + return f"{a} {b} {c}" + + assert foo(a="a", b="b", c="c") == "a b c" + + error_message = r"Missing required arguments.*" + + with pytest.raises(TypeError, match=error_message): + foo() + + with pytest.raises(TypeError, match=error_message): + foo(a="a") + + with pytest.raises(TypeError, match=error_message): + foo(b="b") + + with pytest.raises(TypeError, match=error_message): + foo(c="c") + + with pytest.raises(TypeError, match=r"Missing required argument: 'a'"): + foo(b="a", c="c") + + with pytest.raises(TypeError, match=r"Missing required argument: 'b'"): + foo("a", c="c") + + +def test_multiple_variants() -> None: + @required_args(["a"], ["b"]) + def foo(*, a: str | None = None, b: str | None = None) -> str | None: + return a if a is not None else b + + assert foo(a="foo") == "foo" + assert foo(b="bar") == "bar" + assert foo(a=None) is None + assert foo(b=None) is None + + # TODO: this error message could probably be improved + with pytest.raises( + TypeError, + match=r"Missing required arguments; Expected either \('a'\) or \('b'\) arguments to be given", + ): + foo() + + +def test_multiple_params_multiple_variants() -> None: + @required_args(["a", "b"], ["c"]) + def foo(*, a: str | None = None, b: str | None = None, c: str | None = None) -> str | None: + if a is not None: + return a + if b is not None: + return b + return c + + error_message = r"Missing required arguments; Expected either \('a' and 'b'\) or \('c'\) arguments to be given" + + with pytest.raises(TypeError, match=error_message): + foo(a="foo") + + with pytest.raises(TypeError, match=error_message): + foo(b="bar") + + with pytest.raises(TypeError, match=error_message): + foo() + + assert foo(a=None, b="bar") == "bar" + assert foo(c=None) is None + assert foo(c="foo") == "foo" diff --git a/tests/test_response.py b/tests/test_response.py new file mode 100644 index 00000000..768537aa --- /dev/null +++ b/tests/test_response.py @@ -0,0 +1,279 @@ +import json +from typing import Any, List, Union, cast +from typing_extensions import Annotated + +import httpx +import pytest +import pydantic + +from digitalocean_genai_sdk import BaseModel, DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk._response import ( + APIResponse, + BaseAPIResponse, + AsyncAPIResponse, + BinaryAPIResponse, + AsyncBinaryAPIResponse, + extract_response_type, +) +from digitalocean_genai_sdk._streaming import Stream +from digitalocean_genai_sdk._base_client import FinalRequestOptions + + +class ConcreteBaseAPIResponse(APIResponse[bytes]): ... + + +class ConcreteAPIResponse(APIResponse[List[str]]): ... + + +class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): ... + + +def test_extract_response_type_direct_classes() -> None: + assert extract_response_type(BaseAPIResponse[str]) == str + assert extract_response_type(APIResponse[str]) == str + assert extract_response_type(AsyncAPIResponse[str]) == str + + +def test_extract_response_type_direct_class_missing_type_arg() -> None: + with pytest.raises( + RuntimeError, + match="Expected type to have a type argument at index 0 but it did not", + ): + extract_response_type(AsyncAPIResponse) + + +def test_extract_response_type_concrete_subclasses() -> None: + assert extract_response_type(ConcreteBaseAPIResponse) == bytes + assert extract_response_type(ConcreteAPIResponse) == List[str] + assert extract_response_type(ConcreteAsyncAPIResponse) == httpx.Response + + +def test_extract_response_type_binary_response() -> None: + assert extract_response_type(BinaryAPIResponse) == bytes + assert extract_response_type(AsyncBinaryAPIResponse) == bytes + + +class PydanticModel(pydantic.BaseModel): ... + + +def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo"), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + with pytest.raises( + TypeError, + match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`", + ): + response.parse(to=PydanticModel) + + +@pytest.mark.asyncio +async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo"), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + with pytest.raises( + TypeError, + match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`", + ): + await response.parse(to=PydanticModel) + + +def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo"), + client=client, + stream=True, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + stream = response.parse(to=Stream[int]) + assert stream._cast_to == int + + +@pytest.mark.asyncio +async def test_async_response_parse_custom_stream(async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo"), + client=async_client, + stream=True, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + stream = await response.parse(to=Stream[int]) + assert stream._cast_to == int + + +class CustomModel(BaseModel): + foo: str + bar: int + + +def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None: + response = APIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=CustomModel) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +@pytest.mark.asyncio +async def test_async_response_parse_custom_model(async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse(to=CustomModel) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None: + response = APIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse( + to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]), + ) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse( + to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]), + ) + assert obj.foo == "hello!" + assert obj.bar == 2 + + +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expected: bool) -> None: + response = APIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = response.parse(to=bool) + assert result is expected + + +@pytest.mark.parametrize( + "content, expected", + [ + ("false", False), + ("true", True), + ("False", False), + ("True", True), + ("TrUe", True), + ("FalSe", False), + ], +) +async def test_async_response_parse_bool(client: AsyncDigitaloceanGenaiSDK, content: str, expected: bool) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=content), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + result = await response.parse(to=bool) + assert result is expected + + +class OtherModel(BaseModel): + a: str + + +@pytest.mark.parametrize("client", [False], indirect=True) # loose validation +def test_response_parse_expect_model_union_non_json_content(client: DigitaloceanGenaiSDK) -> None: + response = APIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation +async def test_async_response_parse_expect_model_union_non_json_content( + async_client: AsyncDigitaloceanGenaiSDK, +) -> None: + response = AsyncAPIResponse( + raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), + client=async_client, + stream=False, + stream_cls=None, + cast_to=str, + options=FinalRequestOptions.construct(method="get", url="/foo"), + ) + + obj = await response.parse(to=cast(Any, Union[CustomModel, OtherModel])) + assert isinstance(obj, str) + assert obj == "foo" diff --git a/tests/test_streaming.py b/tests/test_streaming.py new file mode 100644 index 00000000..e707c674 --- /dev/null +++ b/tests/test_streaming.py @@ -0,0 +1,262 @@ +from __future__ import annotations + +from typing import Iterator, AsyncIterator + +import httpx +import pytest + +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk._streaming import Stream, AsyncStream, ServerSentEvent + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_basic(sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK) -> None: + def body() -> Iterator[bytes]: + yield b"event: completion\n" + yield b'data: {"foo":true}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "completion" + assert sse.json() == {"foo": True} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_data_missing_event( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b'data: {"foo":true}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"foo": True} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_event_missing_data( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.data == "" + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_events( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"\n" + yield b"event: completion\n" + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.data == "" + + sse = await iter_next(iterator) + assert sse.event == "completion" + assert sse.data == "" + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_events_with_data( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b'data: {"foo":true}\n' + yield b"\n" + yield b"event: completion\n" + yield b'data: {"bar":false}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": True} + + sse = await iter_next(iterator) + assert sse.event == "completion" + assert sse.json() == {"bar": False} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_data_lines_with_empty_line( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"data: {\n" + yield b'data: "foo":\n' + yield b"data: \n" + yield b"data:\n" + yield b"data: true}\n" + yield b"\n\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": True} + assert sse.data == '{\n"foo":\n\n\ntrue}' + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_data_json_escaped_double_new_line( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b'data: {"foo": "my long\\n\\ncontent"}' + yield b"\n\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": "my long\n\ncontent"} + + await assert_empty_iter(iterator) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multiple_data_lines( + sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK +) -> None: + def body() -> Iterator[bytes]: + yield b"event: ping\n" + yield b"data: {\n" + yield b'data: "foo":\n' + yield b"data: true}\n" + yield b"\n\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event == "ping" + assert sse.json() == {"foo": True} + + await assert_empty_iter(iterator) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_special_new_line_character( + sync: bool, + client: DigitaloceanGenaiSDK, + async_client: AsyncDigitaloceanGenaiSDK, +) -> None: + def body() -> Iterator[bytes]: + yield b'data: {"content":" culpa"}\n' + yield b"\n" + yield b'data: {"content":" \xe2\x80\xa8"}\n' + yield b"\n" + yield b'data: {"content":"foo"}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": " culpa"} + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": " 
"} + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": "foo"} + + await assert_empty_iter(iterator) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +async def test_multi_byte_character_multiple_chunks( + sync: bool, + client: DigitaloceanGenaiSDK, + async_client: AsyncDigitaloceanGenaiSDK, +) -> None: + def body() -> Iterator[bytes]: + yield b'data: {"content":"' + # bytes taken from the string 'известни' and arbitrarily split + # so that some multi-byte characters span multiple chunks + yield b"\xd0" + yield b"\xb8\xd0\xb7\xd0" + yield b"\xb2\xd0\xb5\xd1\x81\xd1\x82\xd0\xbd\xd0\xb8" + yield b'"}\n' + yield b"\n" + + iterator = make_event_iterator(content=body(), sync=sync, client=client, async_client=async_client) + + sse = await iter_next(iterator) + assert sse.event is None + assert sse.json() == {"content": "известни"} + + +async def to_aiter(iter: Iterator[bytes]) -> AsyncIterator[bytes]: + for chunk in iter: + yield chunk + + +async def iter_next(iter: Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]) -> ServerSentEvent: + if isinstance(iter, AsyncIterator): + return await iter.__anext__() + + return next(iter) + + +async def assert_empty_iter(iter: Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]) -> None: + with pytest.raises((StopAsyncIteration, RuntimeError)): + await iter_next(iter) + + +def make_event_iterator( + content: Iterator[bytes], + *, + sync: bool, + client: DigitaloceanGenaiSDK, + async_client: AsyncDigitaloceanGenaiSDK, +) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]: + if sync: + return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events() + + return AsyncStream( + cast_to=object, client=async_client, response=httpx.Response(200, content=to_aiter(content)) + )._iter_events() diff --git a/tests/test_transform.py b/tests/test_transform.py new file mode 100644 index 00000000..3c29084e --- /dev/null +++ b/tests/test_transform.py @@ -0,0 +1,453 @@ +from __future__ import annotations + +import io +import pathlib +from typing import Any, Dict, List, Union, TypeVar, Iterable, Optional, cast +from datetime import date, datetime +from typing_extensions import Required, Annotated, TypedDict + +import pytest + +from digitalocean_genai_sdk._types import NOT_GIVEN, Base64FileInput +from digitalocean_genai_sdk._utils import ( + PropertyInfo, + transform as _transform, + parse_datetime, + async_transform as _async_transform, +) +from digitalocean_genai_sdk._compat import PYDANTIC_V2 +from digitalocean_genai_sdk._models import BaseModel + +_T = TypeVar("_T") + +SAMPLE_FILE_PATH = pathlib.Path(__file__).parent.joinpath("sample_file.txt") + + +async def transform( + data: _T, + expected_type: object, + use_async: bool, +) -> _T: + if use_async: + return await _async_transform(data, expected_type=expected_type) + + return _transform(data, expected_type=expected_type) + + +parametrize = pytest.mark.parametrize("use_async", [False, True], ids=["sync", "async"]) + + +class Foo1(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +@parametrize +@pytest.mark.asyncio +async def test_top_level_alias(use_async: bool) -> None: + assert await transform({"foo_bar": "hello"}, expected_type=Foo1, use_async=use_async) == {"fooBar": "hello"} + + +class Foo2(TypedDict): + bar: Bar2 + + +class Bar2(TypedDict): + this_thing: Annotated[int, PropertyInfo(alias="this__thing")] + baz: Annotated[Baz2, PropertyInfo(alias="Baz")] + + +class Baz2(TypedDict): + my_baz: Annotated[str, PropertyInfo(alias="myBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_recursive_typeddict(use_async: bool) -> None: + assert await transform({"bar": {"this_thing": 1}}, Foo2, use_async) == {"bar": {"this__thing": 1}} + assert await transform({"bar": {"baz": {"my_baz": "foo"}}}, Foo2, use_async) == {"bar": {"Baz": {"myBaz": "foo"}}} + + +class Foo3(TypedDict): + things: List[Bar3] + + +class Bar3(TypedDict): + my_field: Annotated[str, PropertyInfo(alias="myField")] + + +@parametrize +@pytest.mark.asyncio +async def test_list_of_typeddict(use_async: bool) -> None: + result = await transform({"things": [{"my_field": "foo"}, {"my_field": "foo2"}]}, Foo3, use_async) + assert result == {"things": [{"myField": "foo"}, {"myField": "foo2"}]} + + +class Foo4(TypedDict): + foo: Union[Bar4, Baz4] + + +class Bar4(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz4(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_union_of_typeddict(use_async: bool) -> None: + assert await transform({"foo": {"foo_bar": "bar"}}, Foo4, use_async) == {"foo": {"fooBar": "bar"}} + assert await transform({"foo": {"foo_baz": "baz"}}, Foo4, use_async) == {"foo": {"fooBaz": "baz"}} + assert await transform({"foo": {"foo_baz": "baz", "foo_bar": "bar"}}, Foo4, use_async) == { + "foo": {"fooBaz": "baz", "fooBar": "bar"} + } + + +class Foo5(TypedDict): + foo: Annotated[Union[Bar4, List[Baz4]], PropertyInfo(alias="FOO")] + + +class Bar5(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz5(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_union_of_list(use_async: bool) -> None: + assert await transform({"foo": {"foo_bar": "bar"}}, Foo5, use_async) == {"FOO": {"fooBar": "bar"}} + assert await transform( + { + "foo": [ + {"foo_baz": "baz"}, + {"foo_baz": "baz"}, + ] + }, + Foo5, + use_async, + ) == {"FOO": [{"fooBaz": "baz"}, {"fooBaz": "baz"}]} + + +class Foo6(TypedDict): + bar: Annotated[str, PropertyInfo(alias="Bar")] + + +@parametrize +@pytest.mark.asyncio +async def test_includes_unknown_keys(use_async: bool) -> None: + assert await transform({"bar": "bar", "baz_": {"FOO": 1}}, Foo6, use_async) == { + "Bar": "bar", + "baz_": {"FOO": 1}, + } + + +class Foo7(TypedDict): + bar: Annotated[List[Bar7], PropertyInfo(alias="bAr")] + foo: Bar7 + + +class Bar7(TypedDict): + foo: str + + +@parametrize +@pytest.mark.asyncio +async def test_ignores_invalid_input(use_async: bool) -> None: + assert await transform({"bar": ""}, Foo7, use_async) == {"bAr": ""} + assert await transform({"foo": ""}, Foo7, use_async) == {"foo": ""} + + +class DatetimeDict(TypedDict, total=False): + foo: Annotated[datetime, PropertyInfo(format="iso8601")] + + bar: Annotated[Optional[datetime], PropertyInfo(format="iso8601")] + + required: Required[Annotated[Optional[datetime], PropertyInfo(format="iso8601")]] + + list_: Required[Annotated[Optional[List[datetime]], PropertyInfo(format="iso8601")]] + + union: Annotated[Union[int, datetime], PropertyInfo(format="iso8601")] + + +class DateDict(TypedDict, total=False): + foo: Annotated[date, PropertyInfo(format="iso8601")] + + +class DatetimeModel(BaseModel): + foo: datetime + + +class DateModel(BaseModel): + foo: Optional[date] + + +@parametrize +@pytest.mark.asyncio +async def test_iso8601_format(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + tz = "Z" if PYDANTIC_V2 else "+00:00" + assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] + assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap] + + dt = dt.replace(tzinfo=None) + assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] + assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] + + assert await transform({"foo": None}, DateDict, use_async) == {"foo": None} # type: ignore[comparison-overlap] + assert await transform(DateModel(foo=None), Any, use_async) == {"foo": None} # type: ignore + assert await transform({"foo": date.fromisoformat("2023-02-23")}, DateDict, use_async) == {"foo": "2023-02-23"} # type: ignore[comparison-overlap] + assert await transform(DateModel(foo=date.fromisoformat("2023-02-23")), DateDict, use_async) == { + "foo": "2023-02-23" + } # type: ignore[comparison-overlap] + + +@parametrize +@pytest.mark.asyncio +async def test_optional_iso8601_format(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + assert await transform({"bar": dt}, DatetimeDict, use_async) == {"bar": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] + + assert await transform({"bar": None}, DatetimeDict, use_async) == {"bar": None} + + +@parametrize +@pytest.mark.asyncio +async def test_required_iso8601_format(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + assert await transform({"required": dt}, DatetimeDict, use_async) == { + "required": "2023-02-23T14:16:36.337692+00:00" + } # type: ignore[comparison-overlap] + + assert await transform({"required": None}, DatetimeDict, use_async) == {"required": None} + + +@parametrize +@pytest.mark.asyncio +async def test_union_datetime(use_async: bool) -> None: + dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + assert await transform({"union": dt}, DatetimeDict, use_async) == { # type: ignore[comparison-overlap] + "union": "2023-02-23T14:16:36.337692+00:00" + } + + assert await transform({"union": "foo"}, DatetimeDict, use_async) == {"union": "foo"} + + +@parametrize +@pytest.mark.asyncio +async def test_nested_list_iso6801_format(use_async: bool) -> None: + dt1 = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") + dt2 = parse_datetime("2022-01-15T06:34:23Z") + assert await transform({"list_": [dt1, dt2]}, DatetimeDict, use_async) == { # type: ignore[comparison-overlap] + "list_": ["2023-02-23T14:16:36.337692+00:00", "2022-01-15T06:34:23+00:00"] + } + + +@parametrize +@pytest.mark.asyncio +async def test_datetime_custom_format(use_async: bool) -> None: + dt = parse_datetime("2022-01-15T06:34:23Z") + + result = await transform(dt, Annotated[datetime, PropertyInfo(format="custom", format_template="%H")], use_async) + assert result == "06" # type: ignore[comparison-overlap] + + +class DateDictWithRequiredAlias(TypedDict, total=False): + required_prop: Required[Annotated[date, PropertyInfo(format="iso8601", alias="prop")]] + + +@parametrize +@pytest.mark.asyncio +async def test_datetime_with_alias(use_async: bool) -> None: + assert await transform({"required_prop": None}, DateDictWithRequiredAlias, use_async) == {"prop": None} # type: ignore[comparison-overlap] + assert await transform( + {"required_prop": date.fromisoformat("2023-02-23")}, DateDictWithRequiredAlias, use_async + ) == {"prop": "2023-02-23"} # type: ignore[comparison-overlap] + + +class MyModel(BaseModel): + foo: str + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_model_to_dictionary(use_async: bool) -> None: + assert cast(Any, await transform(MyModel(foo="hi!"), Any, use_async)) == {"foo": "hi!"} + assert cast(Any, await transform(MyModel.construct(foo="hi!"), Any, use_async)) == {"foo": "hi!"} + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_empty_model(use_async: bool) -> None: + assert cast(Any, await transform(MyModel.construct(), Any, use_async)) == {} + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_unknown_field(use_async: bool) -> None: + assert cast(Any, await transform(MyModel.construct(my_untyped_field=True), Any, use_async)) == { + "my_untyped_field": True + } + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_mismatched_types(use_async: bool) -> None: + model = MyModel.construct(foo=True) + if PYDANTIC_V2: + with pytest.warns(UserWarning): + params = await transform(model, Any, use_async) + else: + params = await transform(model, Any, use_async) + assert cast(Any, params) == {"foo": True} + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_mismatched_object_type(use_async: bool) -> None: + model = MyModel.construct(foo=MyModel.construct(hello="world")) + if PYDANTIC_V2: + with pytest.warns(UserWarning): + params = await transform(model, Any, use_async) + else: + params = await transform(model, Any, use_async) + assert cast(Any, params) == {"foo": {"hello": "world"}} + + +class ModelNestedObjects(BaseModel): + nested: MyModel + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_nested_objects(use_async: bool) -> None: + model = ModelNestedObjects.construct(nested={"foo": "stainless"}) + assert isinstance(model.nested, MyModel) + assert cast(Any, await transform(model, Any, use_async)) == {"nested": {"foo": "stainless"}} + + +class ModelWithDefaultField(BaseModel): + foo: str + with_none_default: Union[str, None] = None + with_str_default: str = "foo" + + +@parametrize +@pytest.mark.asyncio +async def test_pydantic_default_field(use_async: bool) -> None: + # should be excluded when defaults are used + model = ModelWithDefaultField.construct() + assert model.with_none_default is None + assert model.with_str_default == "foo" + assert cast(Any, await transform(model, Any, use_async)) == {} + + # should be included when the default value is explicitly given + model = ModelWithDefaultField.construct(with_none_default=None, with_str_default="foo") + assert model.with_none_default is None + assert model.with_str_default == "foo" + assert cast(Any, await transform(model, Any, use_async)) == {"with_none_default": None, "with_str_default": "foo"} + + # should be included when a non-default value is explicitly given + model = ModelWithDefaultField.construct(with_none_default="bar", with_str_default="baz") + assert model.with_none_default == "bar" + assert model.with_str_default == "baz" + assert cast(Any, await transform(model, Any, use_async)) == {"with_none_default": "bar", "with_str_default": "baz"} + + +class TypedDictIterableUnion(TypedDict): + foo: Annotated[Union[Bar8, Iterable[Baz8]], PropertyInfo(alias="FOO")] + + +class Bar8(TypedDict): + foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] + + +class Baz8(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + +@parametrize +@pytest.mark.asyncio +async def test_iterable_of_dictionaries(use_async: bool) -> None: + assert await transform({"foo": [{"foo_baz": "bar"}]}, TypedDictIterableUnion, use_async) == { + "FOO": [{"fooBaz": "bar"}] + } + assert cast(Any, await transform({"foo": ({"foo_baz": "bar"},)}, TypedDictIterableUnion, use_async)) == { + "FOO": [{"fooBaz": "bar"}] + } + + def my_iter() -> Iterable[Baz8]: + yield {"foo_baz": "hello"} + yield {"foo_baz": "world"} + + assert await transform({"foo": my_iter()}, TypedDictIterableUnion, use_async) == { + "FOO": [{"fooBaz": "hello"}, {"fooBaz": "world"}] + } + + +@parametrize +@pytest.mark.asyncio +async def test_dictionary_items(use_async: bool) -> None: + class DictItems(TypedDict): + foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] + + assert await transform({"foo": {"foo_baz": "bar"}}, Dict[str, DictItems], use_async) == {"foo": {"fooBaz": "bar"}} + + +class TypedDictIterableUnionStr(TypedDict): + foo: Annotated[Union[str, Iterable[Baz8]], PropertyInfo(alias="FOO")] + + +@parametrize +@pytest.mark.asyncio +async def test_iterable_union_str(use_async: bool) -> None: + assert await transform({"foo": "bar"}, TypedDictIterableUnionStr, use_async) == {"FOO": "bar"} + assert cast(Any, await transform(iter([{"foo_baz": "bar"}]), Union[str, Iterable[Baz8]], use_async)) == [ + {"fooBaz": "bar"} + ] + + +class TypedDictBase64Input(TypedDict): + foo: Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")] + + +@parametrize +@pytest.mark.asyncio +async def test_base64_file_input(use_async: bool) -> None: + # strings are left as-is + assert await transform({"foo": "bar"}, TypedDictBase64Input, use_async) == {"foo": "bar"} + + # pathlib.Path is automatically converted to base64 + assert await transform({"foo": SAMPLE_FILE_PATH}, TypedDictBase64Input, use_async) == { + "foo": "SGVsbG8sIHdvcmxkIQo=" + } # type: ignore[comparison-overlap] + + # io instances are automatically converted to base64 + assert await transform({"foo": io.StringIO("Hello, world!")}, TypedDictBase64Input, use_async) == { + "foo": "SGVsbG8sIHdvcmxkIQ==" + } # type: ignore[comparison-overlap] + assert await transform({"foo": io.BytesIO(b"Hello, world!")}, TypedDictBase64Input, use_async) == { + "foo": "SGVsbG8sIHdvcmxkIQ==" + } # type: ignore[comparison-overlap] + + +@parametrize +@pytest.mark.asyncio +async def test_transform_skipping(use_async: bool) -> None: + # lists of ints are left as-is + data = [1, 2, 3] + assert await transform(data, List[int], use_async) is data + + # iterables of ints are converted to a list + data = iter([1, 2, 3]) + assert await transform(data, Iterable[int], use_async) == [1, 2, 3] + + +@parametrize +@pytest.mark.asyncio +async def test_strips_notgiven(use_async: bool) -> None: + assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} + assert await transform({"foo_bar": NOT_GIVEN}, Foo1, use_async) == {} diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py new file mode 100644 index 00000000..6fe8c808 --- /dev/null +++ b/tests/test_utils/test_proxy.py @@ -0,0 +1,34 @@ +import operator +from typing import Any +from typing_extensions import override + +from digitalocean_genai_sdk._utils import LazyProxy + + +class RecursiveLazyProxy(LazyProxy[Any]): + @override + def __load__(self) -> Any: + return self + + def __call__(self, *_args: Any, **_kwds: Any) -> Any: + raise RuntimeError("This should never be called!") + + +def test_recursive_proxy() -> None: + proxy = RecursiveLazyProxy() + assert repr(proxy) == "RecursiveLazyProxy" + assert str(proxy) == "RecursiveLazyProxy" + assert dir(proxy) == [] + assert type(proxy).__name__ == "RecursiveLazyProxy" + assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy" + + +def test_isinstance_does_not_error() -> None: + class AlwaysErrorProxy(LazyProxy[Any]): + @override + def __load__(self) -> Any: + raise RuntimeError("Mocking missing dependency") + + proxy = AlwaysErrorProxy() + assert not isinstance(proxy, dict) + assert isinstance(proxy, LazyProxy) diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py new file mode 100644 index 00000000..72bf3422 --- /dev/null +++ b/tests/test_utils/test_typing.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from typing import Generic, TypeVar, cast + +from digitalocean_genai_sdk._utils import extract_type_var_from_base + +_T = TypeVar("_T") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") + + +class BaseGeneric(Generic[_T]): ... + + +class SubclassGeneric(BaseGeneric[_T]): ... + + +class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): ... + + +class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ... + + +class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): ... + + +def test_extract_type_var() -> None: + assert ( + extract_type_var_from_base( + BaseGeneric[int], + index=0, + generic_bases=cast("tuple[type, ...]", (BaseGeneric,)), + ) + == int + ) + + +def test_extract_type_var_generic_subclass() -> None: + assert ( + extract_type_var_from_base( + SubclassGeneric[int], + index=0, + generic_bases=cast("tuple[type, ...]", (BaseGeneric,)), + ) + == int + ) + + +def test_extract_type_var_multiple() -> None: + typ = BaseGenericMultipleTypeArgs[int, str, None] + + generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) + assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int + assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str + assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) + + +def test_extract_type_var_generic_subclass_multiple() -> None: + typ = SubclassGenericMultipleTypeArgs[int, str, None] + + generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) + assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int + assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str + assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) + + +def test_extract_type_var_generic_subclass_different_ordering_multiple() -> None: + typ = SubclassDifferentOrderGenericMultipleTypeArgs[int, str, None] + + generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) + assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int + assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str + assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..e795e2e8 --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import os +import inspect +import traceback +import contextlib +from typing import Any, TypeVar, Iterator, cast +from datetime import date, datetime +from typing_extensions import Literal, get_args, get_origin, assert_type + +from digitalocean_genai_sdk._types import Omit, NoneType +from digitalocean_genai_sdk._utils import ( + is_dict, + is_list, + is_list_type, + is_union_type, + extract_type_arg, + is_annotated_type, + is_type_alias_type, +) +from digitalocean_genai_sdk._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from digitalocean_genai_sdk._models import BaseModel + +BaseModelT = TypeVar("BaseModelT", bound=BaseModel) + + +def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool: + for name, field in get_model_fields(model).items(): + field_value = getattr(value, name) + if PYDANTIC_V2: + allow_none = False + else: + # in v1 nullability was structured differently + # https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields + allow_none = getattr(field, "allow_none", False) + + assert_matches_type( + field_outer_type(field), + field_value, + path=[*path, name], + allow_none=allow_none, + ) + + return True + + +# Note: the `path` argument is only used to improve error messages when `--showlocals` is used +def assert_matches_type( + type_: Any, + value: object, + *, + path: list[str], + allow_none: bool = False, +) -> None: + if is_type_alias_type(type_): + type_ = type_.__value__ + + # unwrap `Annotated[T, ...]` -> `T` + if is_annotated_type(type_): + type_ = extract_type_arg(type_, 0) + + if allow_none and value is None: + return + + if type_ is None or type_ is NoneType: + assert value is None + return + + origin = get_origin(type_) or type_ + + if is_list_type(type_): + return _assert_list_type(type_, value) + + if origin == str: + assert isinstance(value, str) + elif origin == int: + assert isinstance(value, int) + elif origin == bool: + assert isinstance(value, bool) + elif origin == float: + assert isinstance(value, float) + elif origin == bytes: + assert isinstance(value, bytes) + elif origin == datetime: + assert isinstance(value, datetime) + elif origin == date: + assert isinstance(value, date) + elif origin == object: + # nothing to do here, the expected type is unknown + pass + elif origin == Literal: + assert value in get_args(type_) + elif origin == dict: + assert is_dict(value) + + args = get_args(type_) + key_type = args[0] + items_type = args[1] + + for key, item in value.items(): + assert_matches_type(key_type, key, path=[*path, ""]) + assert_matches_type(items_type, item, path=[*path, ""]) + elif is_union_type(type_): + variants = get_args(type_) + + try: + none_index = variants.index(type(None)) + except ValueError: + pass + else: + # special case Optional[T] for better error messages + if len(variants) == 2: + if value is None: + # valid + return + + return assert_matches_type(type_=variants[not none_index], value=value, path=path) + + for i, variant in enumerate(variants): + try: + assert_matches_type(variant, value, path=[*path, f"variant {i}"]) + return + except AssertionError: + traceback.print_exc() + continue + + raise AssertionError("Did not match any variants") + elif issubclass(origin, BaseModel): + assert isinstance(value, type_) + assert assert_matches_model(type_, cast(Any, value), path=path) + elif inspect.isclass(origin) and origin.__name__ == "HttpxBinaryResponseContent": + assert value.__class__.__name__ == "HttpxBinaryResponseContent" + else: + assert None, f"Unhandled field type: {type_}" + + +def _assert_list_type(type_: type[object], value: object) -> None: + assert is_list(value) + + inner_type = get_args(type_)[0] + for entry in value: + assert_type(inner_type, entry) # type: ignore + + +@contextlib.contextmanager +def update_env(**new_env: str | Omit) -> Iterator[None]: + old = os.environ.copy() + + try: + for name, value in new_env.items(): + if isinstance(value, Omit): + os.environ.pop(name, None) + else: + os.environ[name] = value + + yield None + finally: + os.environ.clear() + os.environ.update(old) From 5a4661c38671575a41fa3fd4971c02749f90a604 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 22:31:37 +0000 Subject: [PATCH 002/200] chore: update SDK settings --- .github/workflows/publish-pypi.yml | 31 +++++++++ .github/workflows/release-doctor.yml | 21 ++++++ .release-please-manifest.json | 3 + .stats.yml | 2 +- CONTRIBUTING.md | 4 +- README.md | 10 +-- bin/check-release-environment | 21 ++++++ pyproject.toml | 6 +- release-please-config.json | 66 +++++++++++++++++++ src/digitalocean_genai_sdk/_files.py | 2 +- src/digitalocean_genai_sdk/_version.py | 2 +- .../resources/assistants.py | 8 +-- src/digitalocean_genai_sdk/resources/audio.py | 8 +-- .../resources/batches.py | 8 +-- .../resources/chat/chat.py | 8 +-- .../resources/chat/completions.py | 8 +-- .../resources/completions.py | 8 +-- .../resources/embeddings.py | 8 +-- src/digitalocean_genai_sdk/resources/files.py | 8 +-- .../fine_tuning/checkpoints/checkpoints.py | 8 +-- .../fine_tuning/checkpoints/permissions.py | 8 +-- .../resources/fine_tuning/fine_tuning.py | 8 +-- .../resources/fine_tuning/jobs/checkpoints.py | 8 +-- .../resources/fine_tuning/jobs/events.py | 8 +-- .../resources/fine_tuning/jobs/jobs.py | 8 +-- .../resources/images.py | 8 +-- .../resources/models.py | 8 +-- .../resources/moderations.py | 8 +-- .../resources/organization/admin_api_keys.py | 8 +-- .../resources/organization/invites.py | 8 +-- .../resources/organization/organization.py | 8 +-- .../organization/projects/api_keys.py | 8 +-- .../organization/projects/projects.py | 8 +-- .../organization/projects/rate_limits.py | 8 +-- .../organization/projects/service_accounts.py | 8 +-- .../resources/organization/projects/users.py | 8 +-- .../resources/organization/usage.py | 8 +-- .../resources/organization/users.py | 8 +-- .../resources/realtime.py | 8 +-- .../resources/responses.py | 8 +-- .../resources/threads/messages.py | 8 +-- .../resources/threads/runs/runs.py | 8 +-- .../resources/threads/runs/steps.py | 8 +-- .../resources/threads/threads.py | 8 +-- .../resources/uploads.py | 8 +-- .../resources/vector_stores/file_batches.py | 8 +-- .../resources/vector_stores/files.py | 8 +-- .../resources/vector_stores/vector_stores.py | 8 +-- 48 files changed, 303 insertions(+), 161 deletions(-) create mode 100644 .github/workflows/publish-pypi.yml create mode 100644 .github/workflows/release-doctor.yml create mode 100644 .release-please-manifest.json create mode 100644 bin/check-release-environment create mode 100644 release-please-config.json diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 00000000..2bc5b4b2 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,31 @@ +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml +name: Publish PyPI +on: + workflow_dispatch: + + release: + types: [published] + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml new file mode 100644 index 00000000..0f23cbc4 --- /dev/null +++ b/.github/workflows/release-doctor.yml @@ -0,0 +1,21 @@ +name: Release Doctor +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + release_doctor: + name: release doctor + runs-on: ubuntu-latest + if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + + steps: + - uses: actions/checkout@v4 + + - name: Check release environment + run: | + bash ./bin/check-release-environment + env: + PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000..c4762802 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.0.1-alpha.0" +} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 84a850f9..b7fa13b2 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 126 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml openapi_spec_hash: 686329a97002025d118dc2367755c18d -config_hash: 39a1554af43cd406e37b5ed5c943649c +config_hash: 837c550e5626bc5bc6bd76ce4ce4ed22 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 79f5523c..66e22ef2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,7 +63,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git +$ pip install git+ssh://git@github.com/digitalocean/genai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -121,7 +121,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index b9fcd7e8..0adb8999 100644 --- a/README.md +++ b/README.md @@ -15,8 +15,8 @@ The REST API documentation can be found on [help.openai.com](https://help.openai ## Installation ```sh -# install from this staging repo -pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git +# install from the production repo +pip install git+ssh://git@github.com/digitalocean/genai-python.git ``` > [!NOTE] @@ -256,9 +256,9 @@ assistant = response.parse() # get the object that `assistants.list()` would ha print(assistant.first_id) ``` -These methods return an [`APIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -362,7 +362,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/bin/check-release-environment b/bin/check-release-environment new file mode 100644 index 00000000..9e89a88a --- /dev/null +++ b/bin/check-release-environment @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +errors=() + +if [ -z "${PYPI_TOKEN}" ]; then + errors+=("The DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") +fi + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/pyproject.toml b/pyproject.toml index 33ffc05d..693c5c28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" -Repository = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" +Homepage = "https://github.com/digitalocean/genai-python" +Repository = "https://github.com/digitalocean/genai-python" [tool.rye] @@ -121,7 +121,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000..234b9475 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,66 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "python", + "extra-files": [ + "src/digitalocean_genai_sdk/_version.py" + ] +} \ No newline at end of file diff --git a/src/digitalocean_genai_sdk/_files.py b/src/digitalocean_genai_sdk/_files.py index df28b382..02512281 100644 --- a/src/digitalocean_genai_sdk/_files.py +++ b/src/digitalocean_genai_sdk/_files.py @@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None: if not is_file_content(obj): prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" raise RuntimeError( - f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main#file-uploads" + f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/digitalocean/genai-python/tree/main#file-uploads" ) from None diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py index 5c4fa53a..bb83d491 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/digitalocean_genai_sdk/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "digitalocean_genai_sdk" -__version__ = "0.0.1-alpha.0" +__version__ = "0.0.1-alpha.0" # x-release-please-version diff --git a/src/digitalocean_genai_sdk/resources/assistants.py b/src/digitalocean_genai_sdk/resources/assistants.py index c6ae36f5..6e31fe9e 100644 --- a/src/digitalocean_genai_sdk/resources/assistants.py +++ b/src/digitalocean_genai_sdk/resources/assistants.py @@ -41,7 +41,7 @@ def with_raw_response(self) -> AssistantsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AssistantsResourceWithRawResponse(self) @@ -50,7 +50,7 @@ def with_streaming_response(self) -> AssistantsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AssistantsResourceWithStreamingResponse(self) @@ -437,7 +437,7 @@ def with_raw_response(self) -> AsyncAssistantsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAssistantsResourceWithRawResponse(self) @@ -446,7 +446,7 @@ def with_streaming_response(self) -> AsyncAssistantsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAssistantsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/audio.py b/src/digitalocean_genai_sdk/resources/audio.py index 7cecbe6d..bf023ba0 100644 --- a/src/digitalocean_genai_sdk/resources/audio.py +++ b/src/digitalocean_genai_sdk/resources/audio.py @@ -45,7 +45,7 @@ def with_raw_response(self) -> AudioResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AudioResourceWithRawResponse(self) @@ -54,7 +54,7 @@ def with_streaming_response(self) -> AudioResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AudioResourceWithStreamingResponse(self) @@ -319,7 +319,7 @@ def with_raw_response(self) -> AsyncAudioResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAudioResourceWithRawResponse(self) @@ -328,7 +328,7 @@ def with_streaming_response(self) -> AsyncAudioResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAudioResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/batches.py b/src/digitalocean_genai_sdk/resources/batches.py index a2b1fedf..bd92c365 100644 --- a/src/digitalocean_genai_sdk/resources/batches.py +++ b/src/digitalocean_genai_sdk/resources/batches.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> BatchesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return BatchesResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> BatchesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return BatchesResourceWithStreamingResponse(self) @@ -240,7 +240,7 @@ def with_raw_response(self) -> AsyncBatchesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncBatchesResourceWithRawResponse(self) @@ -249,7 +249,7 @@ def with_streaming_response(self) -> AsyncBatchesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncBatchesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/chat/chat.py b/src/digitalocean_genai_sdk/resources/chat/chat.py index df1f356c..ac19d849 100644 --- a/src/digitalocean_genai_sdk/resources/chat/chat.py +++ b/src/digitalocean_genai_sdk/resources/chat/chat.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/chat/completions.py b/src/digitalocean_genai_sdk/resources/chat/completions.py index c0908a57..b89b8f9e 100644 --- a/src/digitalocean_genai_sdk/resources/chat/completions.py +++ b/src/digitalocean_genai_sdk/resources/chat/completions.py @@ -44,7 +44,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return CompletionsResourceWithRawResponse(self) @@ -53,7 +53,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return CompletionsResourceWithStreamingResponse(self) @@ -594,7 +594,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncCompletionsResourceWithRawResponse(self) @@ -603,7 +603,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncCompletionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/completions.py b/src/digitalocean_genai_sdk/resources/completions.py index ff495166..cde13a53 100644 --- a/src/digitalocean_genai_sdk/resources/completions.py +++ b/src/digitalocean_genai_sdk/resources/completions.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return CompletionsResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return CompletionsResourceWithStreamingResponse(self) @@ -232,7 +232,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncCompletionsResourceWithRawResponse(self) @@ -241,7 +241,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncCompletionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py index 92552f62..7dc90e34 100644 --- a/src/digitalocean_genai_sdk/resources/embeddings.py +++ b/src/digitalocean_genai_sdk/resources/embeddings.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -119,7 +119,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -128,7 +128,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/files.py b/src/digitalocean_genai_sdk/resources/files.py index 65e459f4..a93712fd 100644 --- a/src/digitalocean_genai_sdk/resources/files.py +++ b/src/digitalocean_genai_sdk/resources/files.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> FilesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FilesResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> FilesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FilesResourceWithStreamingResponse(self) @@ -282,7 +282,7 @@ def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFilesResourceWithRawResponse(self) @@ -291,7 +291,7 @@ def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFilesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py index b1a85058..d2bd64ef 100644 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> CheckpointsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return CheckpointsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return CheckpointsResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncCheckpointsResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncCheckpointsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py index 0dee4435..39e6a210 100644 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> PermissionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return PermissionsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> PermissionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return PermissionsResourceWithStreamingResponse(self) @@ -189,7 +189,7 @@ def with_raw_response(self) -> AsyncPermissionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncPermissionsResourceWithRawResponse(self) @@ -198,7 +198,7 @@ def with_streaming_response(self) -> AsyncPermissionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncPermissionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py index 8b4956b1..9c097afe 100644 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> FineTuningResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FineTuningResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> FineTuningResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FineTuningResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncFineTuningResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFineTuningResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncFineTuningResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFineTuningResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py index d9ade070..adac27a5 100644 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> CheckpointsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return CheckpointsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return CheckpointsResourceWithStreamingResponse(self) @@ -98,7 +98,7 @@ def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncCheckpointsResourceWithRawResponse(self) @@ -107,7 +107,7 @@ def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncCheckpointsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py index 6005084f..56d64766 100644 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> EventsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return EventsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> EventsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return EventsResourceWithStreamingResponse(self) @@ -98,7 +98,7 @@ def with_raw_response(self) -> AsyncEventsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncEventsResourceWithRawResponse(self) @@ -107,7 +107,7 @@ def with_streaming_response(self) -> AsyncEventsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncEventsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py index 86a7ae4b..09670aa9 100644 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py +++ b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py @@ -57,7 +57,7 @@ def with_raw_response(self) -> JobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return JobsResourceWithRawResponse(self) @@ -66,7 +66,7 @@ def with_streaming_response(self) -> JobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return JobsResourceWithStreamingResponse(self) @@ -318,7 +318,7 @@ def with_raw_response(self) -> AsyncJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncJobsResourceWithRawResponse(self) @@ -327,7 +327,7 @@ def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncJobsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/images.py b/src/digitalocean_genai_sdk/resources/images.py index 56a52184..6003c71b 100644 --- a/src/digitalocean_genai_sdk/resources/images.py +++ b/src/digitalocean_genai_sdk/resources/images.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ImagesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ImagesResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ImagesResourceWithStreamingResponse(self) @@ -285,7 +285,7 @@ def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncImagesResourceWithRawResponse(self) @@ -294,7 +294,7 @@ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncImagesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py index 53775057..9bdebc56 100644 --- a/src/digitalocean_genai_sdk/resources/models.py +++ b/src/digitalocean_genai_sdk/resources/models.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -140,7 +140,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -149,7 +149,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/moderations.py b/src/digitalocean_genai_sdk/resources/moderations.py index e9404243..5979cc29 100644 --- a/src/digitalocean_genai_sdk/resources/moderations.py +++ b/src/digitalocean_genai_sdk/resources/moderations.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ModerationsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ModerationsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ModerationsResourceWithStreamingResponse(self) @@ -109,7 +109,7 @@ def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncModerationsResourceWithRawResponse(self) @@ -118,7 +118,7 @@ def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncModerationsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py index 7224871f..d98c5fb7 100644 --- a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py +++ b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> AdminAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AdminAPIKeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> AdminAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AdminAPIKeysResourceWithStreamingResponse(self) @@ -206,7 +206,7 @@ def with_raw_response(self) -> AsyncAdminAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAdminAPIKeysResourceWithRawResponse(self) @@ -215,7 +215,7 @@ def with_streaming_response(self) -> AsyncAdminAPIKeysResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAdminAPIKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/invites.py b/src/digitalocean_genai_sdk/resources/organization/invites.py index 16bd17bc..392d4308 100644 --- a/src/digitalocean_genai_sdk/resources/organization/invites.py +++ b/src/digitalocean_genai_sdk/resources/organization/invites.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> InvitesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return InvitesResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> InvitesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return InvitesResourceWithStreamingResponse(self) @@ -222,7 +222,7 @@ def with_raw_response(self) -> AsyncInvitesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncInvitesResourceWithRawResponse(self) @@ -231,7 +231,7 @@ def with_streaming_response(self) -> AsyncInvitesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncInvitesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/organization.py b/src/digitalocean_genai_sdk/resources/organization/organization.py index 4a9aa4fb..abb1d7c8 100644 --- a/src/digitalocean_genai_sdk/resources/organization/organization.py +++ b/src/digitalocean_genai_sdk/resources/organization/organization.py @@ -93,7 +93,7 @@ def with_raw_response(self) -> OrganizationResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return OrganizationResourceWithRawResponse(self) @@ -102,7 +102,7 @@ def with_streaming_response(self) -> OrganizationResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return OrganizationResourceWithStreamingResponse(self) @@ -289,7 +289,7 @@ def with_raw_response(self) -> AsyncOrganizationResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncOrganizationResourceWithRawResponse(self) @@ -298,7 +298,7 @@ def with_streaming_response(self) -> AsyncOrganizationResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncOrganizationResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py index c5907765..4369beeb 100644 --- a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py +++ b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -176,7 +176,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -185,7 +185,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py index 93e42de8..942c8773 100644 --- a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py +++ b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py @@ -77,7 +77,7 @@ def with_raw_response(self) -> ProjectsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ProjectsResourceWithRawResponse(self) @@ -86,7 +86,7 @@ def with_streaming_response(self) -> ProjectsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ProjectsResourceWithStreamingResponse(self) @@ -311,7 +311,7 @@ def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncProjectsResourceWithRawResponse(self) @@ -320,7 +320,7 @@ def with_streaming_response(self) -> AsyncProjectsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncProjectsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py index 9c9dce7b..b15ef7ec 100644 --- a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py +++ b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> RateLimitsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return RateLimitsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> RateLimitsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return RateLimitsResourceWithStreamingResponse(self) @@ -174,7 +174,7 @@ def with_raw_response(self) -> AsyncRateLimitsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncRateLimitsResourceWithRawResponse(self) @@ -183,7 +183,7 @@ def with_streaming_response(self) -> AsyncRateLimitsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncRateLimitsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py index 8957a81d..d40c841f 100644 --- a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py +++ b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ServiceAccountsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ServiceAccountsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ServiceAccountsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ServiceAccountsResourceWithStreamingResponse(self) @@ -216,7 +216,7 @@ def with_raw_response(self) -> AsyncServiceAccountsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncServiceAccountsResourceWithRawResponse(self) @@ -225,7 +225,7 @@ def with_streaming_response(self) -> AsyncServiceAccountsResourceWithStreamingRe """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncServiceAccountsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/users.py b/src/digitalocean_genai_sdk/resources/organization/projects/users.py index e35ff0cf..6e40263a 100644 --- a/src/digitalocean_genai_sdk/resources/organization/projects/users.py +++ b/src/digitalocean_genai_sdk/resources/organization/projects/users.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> UsersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return UsersResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> UsersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return UsersResourceWithStreamingResponse(self) @@ -266,7 +266,7 @@ def with_raw_response(self) -> AsyncUsersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncUsersResourceWithRawResponse(self) @@ -275,7 +275,7 @@ def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncUsersResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/usage.py b/src/digitalocean_genai_sdk/resources/organization/usage.py index 37d11956..2eae3b13 100644 --- a/src/digitalocean_genai_sdk/resources/organization/usage.py +++ b/src/digitalocean_genai_sdk/resources/organization/usage.py @@ -40,7 +40,7 @@ def with_raw_response(self) -> UsageResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return UsageResourceWithRawResponse(self) @@ -49,7 +49,7 @@ def with_streaming_response(self) -> UsageResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return UsageResourceWithStreamingResponse(self) @@ -735,7 +735,7 @@ def with_raw_response(self) -> AsyncUsageResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncUsageResourceWithRawResponse(self) @@ -744,7 +744,7 @@ def with_streaming_response(self) -> AsyncUsageResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncUsageResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/organization/users.py b/src/digitalocean_genai_sdk/resources/organization/users.py index 536e4396..44fb2f5a 100644 --- a/src/digitalocean_genai_sdk/resources/organization/users.py +++ b/src/digitalocean_genai_sdk/resources/organization/users.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> UsersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return UsersResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> UsersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return UsersResourceWithStreamingResponse(self) @@ -211,7 +211,7 @@ def with_raw_response(self) -> AsyncUsersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncUsersResourceWithRawResponse(self) @@ -220,7 +220,7 @@ def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncUsersResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/realtime.py b/src/digitalocean_genai_sdk/resources/realtime.py index 4c70a798..bfa61beb 100644 --- a/src/digitalocean_genai_sdk/resources/realtime.py +++ b/src/digitalocean_genai_sdk/resources/realtime.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> RealtimeResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return RealtimeResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> RealtimeResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return RealtimeResourceWithStreamingResponse(self) @@ -283,7 +283,7 @@ def with_raw_response(self) -> AsyncRealtimeResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncRealtimeResourceWithRawResponse(self) @@ -292,7 +292,7 @@ def with_streaming_response(self) -> AsyncRealtimeResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncRealtimeResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/responses.py b/src/digitalocean_genai_sdk/resources/responses.py index 03445cdc..1890002e 100644 --- a/src/digitalocean_genai_sdk/resources/responses.py +++ b/src/digitalocean_genai_sdk/resources/responses.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> ResponsesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ResponsesResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ResponsesResourceWithStreamingResponse(self) @@ -434,7 +434,7 @@ def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncResponsesResourceWithRawResponse(self) @@ -443,7 +443,7 @@ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncResponsesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/threads/messages.py b/src/digitalocean_genai_sdk/resources/threads/messages.py index e62eb94c..c4f75672 100644 --- a/src/digitalocean_genai_sdk/resources/threads/messages.py +++ b/src/digitalocean_genai_sdk/resources/threads/messages.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> MessagesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return MessagesResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> MessagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return MessagesResourceWithStreamingResponse(self) @@ -305,7 +305,7 @@ def with_raw_response(self) -> AsyncMessagesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncMessagesResourceWithRawResponse(self) @@ -314,7 +314,7 @@ def with_streaming_response(self) -> AsyncMessagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncMessagesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py index a270b7a9..4e1a9dc8 100644 --- a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py +++ b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py @@ -58,7 +58,7 @@ def with_raw_response(self) -> RunsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return RunsResourceWithRawResponse(self) @@ -67,7 +67,7 @@ def with_streaming_response(self) -> RunsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return RunsResourceWithStreamingResponse(self) @@ -686,7 +686,7 @@ def with_raw_response(self) -> AsyncRunsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncRunsResourceWithRawResponse(self) @@ -695,7 +695,7 @@ def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncRunsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py index 2b5ffd09..bf0b8749 100644 --- a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py +++ b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> StepsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return StepsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> StepsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return StepsResourceWithStreamingResponse(self) @@ -183,7 +183,7 @@ def with_raw_response(self) -> AsyncStepsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncStepsResourceWithRawResponse(self) @@ -192,7 +192,7 @@ def with_streaming_response(self) -> AsyncStepsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncStepsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/threads/threads.py b/src/digitalocean_genai_sdk/resources/threads/threads.py index 64062ffb..5fdf5ea8 100644 --- a/src/digitalocean_genai_sdk/resources/threads/threads.py +++ b/src/digitalocean_genai_sdk/resources/threads/threads.py @@ -56,7 +56,7 @@ def with_raw_response(self) -> ThreadsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ThreadsResourceWithRawResponse(self) @@ -65,7 +65,7 @@ def with_streaming_response(self) -> ThreadsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ThreadsResourceWithStreamingResponse(self) @@ -260,7 +260,7 @@ def with_raw_response(self) -> AsyncThreadsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncThreadsResourceWithRawResponse(self) @@ -269,7 +269,7 @@ def with_streaming_response(self) -> AsyncThreadsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncThreadsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/uploads.py b/src/digitalocean_genai_sdk/resources/uploads.py index 30ba91b5..d36ad23e 100644 --- a/src/digitalocean_genai_sdk/resources/uploads.py +++ b/src/digitalocean_genai_sdk/resources/uploads.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> UploadsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return UploadsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> UploadsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return UploadsResourceWithStreamingResponse(self) @@ -270,7 +270,7 @@ def with_raw_response(self) -> AsyncUploadsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncUploadsResourceWithRawResponse(self) @@ -279,7 +279,7 @@ def with_streaming_response(self) -> AsyncUploadsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncUploadsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py index 0c4334ce..d6dba5f9 100644 --- a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py +++ b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> FileBatchesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FileBatchesResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> FileBatchesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FileBatchesResourceWithStreamingResponse(self) @@ -256,7 +256,7 @@ def with_raw_response(self) -> AsyncFileBatchesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFileBatchesResourceWithRawResponse(self) @@ -265,7 +265,7 @@ def with_streaming_response(self) -> AsyncFileBatchesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFileBatchesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/files.py b/src/digitalocean_genai_sdk/resources/vector_stores/files.py index c40d5b11..84576820 100644 --- a/src/digitalocean_genai_sdk/resources/vector_stores/files.py +++ b/src/digitalocean_genai_sdk/resources/vector_stores/files.py @@ -40,7 +40,7 @@ def with_raw_response(self) -> FilesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FilesResourceWithRawResponse(self) @@ -49,7 +49,7 @@ def with_streaming_response(self) -> FilesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FilesResourceWithStreamingResponse(self) @@ -342,7 +342,7 @@ def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFilesResourceWithRawResponse(self) @@ -351,7 +351,7 @@ def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFilesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py index 8ad572ea..0e80da49 100644 --- a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py +++ b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py @@ -64,7 +64,7 @@ def with_raw_response(self) -> VectorStoresResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return VectorStoresResourceWithRawResponse(self) @@ -73,7 +73,7 @@ def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return VectorStoresResourceWithStreamingResponse(self) @@ -399,7 +399,7 @@ def with_raw_response(self) -> AsyncVectorStoresResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncVectorStoresResourceWithRawResponse(self) @@ -408,7 +408,7 @@ def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncVectorStoresResourceWithStreamingResponse(self) From d2a983b3fb2008628b6e62343488b575b40cbfc2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 22:32:38 +0000 Subject: [PATCH 003/200] chore: update SDK settings --- .stats.yml | 2 +- README.md | 9 +++------ pyproject.toml | 2 +- requirements-dev.lock | 12 ++++++------ requirements.lock | 12 ++++++------ 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index b7fa13b2..8f90d8ba 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 126 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml openapi_spec_hash: 686329a97002025d118dc2367755c18d -config_hash: 837c550e5626bc5bc6bd76ce4ce4ed22 +config_hash: 2da74b81015f4ef6cad3a0bcb9025834 diff --git a/README.md b/README.md index 0adb8999..1fc05afc 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Digitalocean Genai SDK Python API library -[![PyPI version](https://img.shields.io/pypi/v/digitalocean_genai_sdk.svg)](https://pypi.org/project/digitalocean_genai_sdk/) +[![PyPI version](https://img.shields.io/pypi/v/do-genai.svg)](https://pypi.org/project/do-genai/) The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -15,13 +15,10 @@ The REST API documentation can be found on [help.openai.com](https://help.openai ## Installation ```sh -# install from the production repo -pip install git+ssh://git@github.com/digitalocean/genai-python.git +# install from PyPI +pip install --pre do-genai ``` -> [!NOTE] -> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre digitalocean_genai_sdk` - ## Usage The full API of this library can be found in [api.md](api.md). diff --git a/pyproject.toml b/pyproject.toml index 693c5c28..a268c8cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "digitalocean_genai_sdk" +name = "do-genai" version = "0.0.1-alpha.0" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] diff --git a/requirements-dev.lock b/requirements-dev.lock index bf449af3..8a2680e6 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,7 +13,7 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via digitalocean-genai-sdk + # via do-genai # via httpx argcomplete==3.1.2 # via nox @@ -26,7 +26,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via digitalocean-genai-sdk + # via do-genai exceptiongroup==1.2.2 # via anyio # via pytest @@ -37,7 +37,7 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via digitalocean-genai-sdk + # via do-genai # via respx idna==3.4 # via anyio @@ -64,7 +64,7 @@ platformdirs==3.11.0 pluggy==1.5.0 # via pytest pydantic==2.10.3 - # via digitalocean-genai-sdk + # via do-genai pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -86,14 +86,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via digitalocean-genai-sdk + # via do-genai time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via digitalocean-genai-sdk + # via do-genai # via mypy # via pydantic # via pydantic-core diff --git a/requirements.lock b/requirements.lock index e655776d..832a9acd 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,13 +13,13 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via digitalocean-genai-sdk + # via do-genai # via httpx certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via digitalocean-genai-sdk + # via do-genai exceptiongroup==1.2.2 # via anyio h11==0.14.0 @@ -27,19 +27,19 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via digitalocean-genai-sdk + # via do-genai idna==3.4 # via anyio # via httpx pydantic==2.10.3 - # via digitalocean-genai-sdk + # via do-genai pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via digitalocean-genai-sdk + # via do-genai typing-extensions==4.12.2 # via anyio - # via digitalocean-genai-sdk + # via do-genai # via pydantic # via pydantic-core From 7fd870df97a4b83dee7d629c45a47ac1079b98b8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 02:29:15 +0000 Subject: [PATCH 004/200] chore(docs): remove reference to rye shell --- CONTRIBUTING.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66e22ef2..7d5d60a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,8 +17,7 @@ $ rye sync --all-features You can then run scripts using `rye run python script.py` or by activating the virtual environment: ```sh -$ rye shell -# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work +# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work $ source .venv/bin/activate # now you can omit the `rye run` prefix From 1d6cf7409de57e388c0d828f225722acea01b2ea Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 02:41:43 +0000 Subject: [PATCH 005/200] chore(docs): remove unnecessary param examples --- README.md | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/README.md b/README.md index 1fc05afc..b556b556 100644 --- a/README.md +++ b/README.md @@ -88,19 +88,7 @@ client = DigitaloceanGenaiSDK() assistant_object = client.assistants.create( model="gpt-4o", - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, + tool_resources={}, ) print(assistant_object.tool_resources) ``` From b4993eedcc2a25afaf6072fd68678531171b7a60 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 03:45:23 +0000 Subject: [PATCH 006/200] feat(client): add follow_redirects request option --- src/digitalocean_genai_sdk/_base_client.py | 6 +++ src/digitalocean_genai_sdk/_models.py | 2 + src/digitalocean_genai_sdk/_types.py | 2 + tests/test_client.py | 54 ++++++++++++++++++++++ 4 files changed, 64 insertions(+) diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/digitalocean_genai_sdk/_base_client.py index 9f58b2f9..73cd30fc 100644 --- a/src/digitalocean_genai_sdk/_base_client.py +++ b/src/digitalocean_genai_sdk/_base_client.py @@ -960,6 +960,9 @@ def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None @@ -1460,6 +1463,9 @@ async def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None diff --git a/src/digitalocean_genai_sdk/_models.py b/src/digitalocean_genai_sdk/_models.py index 798956f1..4f214980 100644 --- a/src/digitalocean_genai_sdk/_models.py +++ b/src/digitalocean_genai_sdk/_models.py @@ -737,6 +737,7 @@ class FinalRequestOptionsInput(TypedDict, total=False): idempotency_key: str json_data: Body extra_json: AnyMapping + follow_redirects: bool @final @@ -750,6 +751,7 @@ class FinalRequestOptions(pydantic.BaseModel): files: Union[HttpxRequestFiles, None] = None idempotency_key: Union[str, None] = None post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + follow_redirects: Union[bool, None] = None # It should be noted that we cannot use `json` here as that would override # a BaseModel method in an incompatible fashion. diff --git a/src/digitalocean_genai_sdk/_types.py b/src/digitalocean_genai_sdk/_types.py index b2bfbbec..3c0d156e 100644 --- a/src/digitalocean_genai_sdk/_types.py +++ b/src/digitalocean_genai_sdk/_types.py @@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False): params: Query extra_json: AnyMapping idempotency_key: str + follow_redirects: bool # Sentinel class used until PEP 0661 is accepted @@ -215,3 +216,4 @@ class _GenericAlias(Protocol): class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth + follow_redirects: bool diff --git a/tests/test_client.py b/tests/test_client.py index c13403e3..f24c8c5f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -821,6 +821,33 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + class TestAsyncDigitaloceanGenaiSDK: client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) @@ -1638,3 +1665,30 @@ async def test_main() -> None: raise AssertionError("calling get_platform using asyncify resulted in a hung process") time.sleep(0.1) + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + await self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" From 3a0f0d213a448a6f21fe91cf6c4e2610ad5749af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Jun 2025 22:33:04 +0000 Subject: [PATCH 007/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- README.md | 108 +- api.md | 572 +----- src/digitalocean_genai_sdk/_client.py | 131 +- src/digitalocean_genai_sdk/_files.py | 2 +- .../resources/__init__.py | 196 --- .../resources/assistants.py | 910 ---------- src/digitalocean_genai_sdk/resources/audio.py | 650 ------- .../resources/batches.py | 513 ------ .../resources/chat/completions.py | 906 +--------- .../resources/completions.py | 460 ----- .../resources/embeddings.py | 65 +- src/digitalocean_genai_sdk/resources/files.py | 608 ------- .../resources/fine_tuning/__init__.py | 47 - .../fine_tuning/checkpoints/__init__.py | 33 - .../fine_tuning/checkpoints/checkpoints.py | 102 -- .../fine_tuning/checkpoints/permissions.py | 401 ----- .../resources/fine_tuning/fine_tuning.py | 134 -- .../resources/fine_tuning/jobs/__init__.py | 47 - .../resources/fine_tuning/jobs/checkpoints.py | 197 --- .../resources/fine_tuning/jobs/events.py | 197 --- .../resources/fine_tuning/jobs/jobs.py | 668 ------- .../resources/images.py | 592 ------- .../resources/models.py | 83 - .../resources/moderations.py | 216 --- .../resources/organization/__init__.py | 89 - .../resources/organization/admin_api_keys.py | 444 ----- .../resources/organization/invites.py | 476 ----- .../resources/organization/organization.py | 586 ------- .../organization/projects/__init__.py | 75 - .../organization/projects/api_keys.py | 375 ---- .../organization/projects/projects.py | 670 ------- .../organization/projects/rate_limits.py | 360 ---- .../organization/projects/service_accounts.py | 466 ----- .../resources/organization/projects/users.py | 577 ------ .../resources/organization/usage.py | 1543 ----------------- .../resources/organization/users.py | 454 ----- .../resources/realtime.py | 574 ------ .../resources/responses.py | 902 ---------- .../resources/threads/__init__.py | 47 - .../resources/threads/messages.py | 654 ------- .../resources/threads/runs/__init__.py | 33 - .../resources/threads/runs/runs.py | 1427 --------------- .../resources/threads/runs/steps.py | 375 ---- .../resources/threads/threads.py | 553 ------ .../resources/uploads.py | 573 ------ .../resources/vector_stores/__init__.py | 47 - .../resources/vector_stores/file_batches.py | 544 ------ .../resources/vector_stores/files.py | 733 -------- .../resources/vector_stores/vector_stores.py | 847 --------- src/digitalocean_genai_sdk/types/__init__.py | 126 -- .../types/assistant_create_params.py | 211 --- .../types/assistant_delete_response.py | 15 - .../types/assistant_list_params.py | 39 - .../types/assistant_list_response.py | 20 - .../types/assistant_object.py | 133 -- .../types/assistant_supported_models.py | 38 - .../types/assistant_tools_code.py | 12 - .../types/assistant_tools_code_param.py | 12 - .../types/assistant_tools_file_search.py | 56 - .../assistant_tools_file_search_param.py | 56 - .../types/assistant_tools_function.py | 15 - .../types/assistant_tools_function_param.py | 16 - .../types/assistant_update_params.py | 137 -- .../assistants_api_response_format_option.py | 14 - ...stants_api_response_format_option_param.py | 16 - .../types/audio_generate_speech_params.py | 47 - .../types/audio_transcribe_audio_params.py | 87 - .../types/audio_transcribe_audio_response.py | 69 - .../types/audio_translate_audio_params.py | 47 - .../types/audio_translate_audio_response.py | 30 - .../types/audit_log_actor_user.py | 15 - .../types/audit_log_event_type.py | 30 - .../auto_chunking_strategy_request_param.py | 12 - src/digitalocean_genai_sdk/types/batch.py | 109 -- .../types/batch_create_params.py | 46 - .../types/batch_list_params.py | 24 - .../types/batch_list_response.py | 21 - .../types/chat/__init__.py | 18 - .../types/chat/completion_create_params.py | 510 +----- .../types/chat/completion_delete_response.py | 18 - .../chat/completion_list_messages_params.py | 21 - .../chat/completion_list_messages_response.py | 31 - .../types/chat/completion_list_params.py | 31 - .../types/chat/completion_list_response.py | 26 - .../types/chat/completion_update_params.py | 20 - .../types/chat/create_response.py | 18 +- .../types/chat/message_tool_call.py | 31 - .../types/chat/message_tool_call_param.py | 31 - .../types/chat/model_ids_shared_param.py | 57 - .../types/chat/response_format_json_object.py | 12 - .../chat/response_format_json_object_param.py | 12 - .../types/chat/response_format_json_schema.py | 48 - .../chat/response_format_json_schema_param.py | 46 - .../types/chat/response_format_text.py | 12 - .../types/chat/response_format_text_param.py | 12 - .../types/chat/response_message.py | 82 +- .../types/chat/usage.py | 40 +- .../types/chat/web_search_context_size.py | 7 - .../types/chat/web_search_location.py | 27 - .../types/chat/web_search_location_param.py | 27 - .../types/comparison_filter.py | 30 - .../types/comparison_filter_param.py | 30 - .../types/completion_create_params.py | 168 -- .../types/completion_create_response.py | 63 - .../types/compound_filter.py | 22 - .../types/compound_filter_param.py | 23 - .../types/computer_tool_call.py | 198 --- .../types/computer_tool_call_output.py | 50 - .../types/computer_tool_call_output_param.py | 51 - .../types/computer_tool_call_param.py | 199 --- .../types/computer_tool_call_safety_check.py | 16 - .../computer_tool_call_safety_check_param.py | 18 - .../types/create_thread_request_param.py | 130 -- .../types/embedding_create_params.py | 36 +- .../types/embedding_create_response.py | 6 +- .../types/file_delete_response.py | 15 - .../types/file_list_params.py | 33 - .../types/file_list_response.py | 20 - .../types/file_retrieve_content_response.py | 7 - .../types/file_search_ranker.py | 7 - .../types/file_search_tool_call.py | 51 - .../types/file_search_tool_call_param.py | 51 - .../types/file_upload_params.py | 23 - .../types/fine_tuning/__init__.py | 7 - .../types/fine_tuning/checkpoints/__init__.py | 7 - .../list_fine_tuning_checkpoint_permission.py | 34 - .../checkpoints/permission_create_params.py | 13 - .../checkpoints/permission_delete_response.py | 18 - .../checkpoints/permission_retrieve_params.py | 21 - .../types/fine_tuning/fine_tune_method.py | 78 - .../fine_tuning/fine_tune_method_param.py | 78 - .../types/fine_tuning/fine_tuning_job.py | 182 -- .../types/fine_tuning/job_create_params.py | 152 -- .../types/fine_tuning/job_list_params.py | 23 - .../types/fine_tuning/job_list_response.py | 17 - .../types/fine_tuning/jobs/__init__.py | 5 - .../jobs/checkpoint_retrieve_params.py | 15 - .../jobs/checkpoint_retrieve_response.py | 59 - .../fine_tuning/jobs/event_retrieve_params.py | 15 - .../jobs/event_retrieve_response.py | 40 - .../types/function_object.py | 41 - .../types/function_object_param.py | 42 - .../types/function_tool_call.py | 32 - .../types/function_tool_call_output.py | 32 - .../types/function_tool_call_output_param.py | 31 - .../types/function_tool_call_param.py | 31 - .../types/image_create_edit_params.py | 60 - .../types/image_create_generation_params.py | 62 - .../types/image_create_variation_params.py | 49 - .../types/images_response.py | 30 - .../types/includable.py | 9 - .../types/input_content.py | 53 - .../types/input_content_param.py | 53 - .../types/input_message.py | 30 - .../types/input_message_param.py | 31 - .../types/model_delete_response.py | 13 - .../types/model_response_properties.py | 42 - .../types/moderation_classify_params.py | 59 - .../types/moderation_classify_response.py | 203 --- .../types/openai_file.py | 51 - .../types/organization/__init__.py | 31 - .../types/organization/admin_api_key.py | 35 - .../admin_api_key_create_params.py | 11 - .../admin_api_key_delete_response.py | 15 - .../organization/admin_api_key_list_params.py | 19 - .../admin_api_key_list_response.py | 20 - .../types/organization/invite.py | 45 - .../organization/invite_create_params.py | 31 - .../organization/invite_delete_response.py | 16 - .../types/organization/invite_list_params.py | 24 - .../organization/invite_list_response.py | 28 - .../types/organization/organization_user.py | 27 - .../types/organization/project.py | 28 - .../organization/project_create_params.py | 12 - .../types/organization/project_list_params.py | 30 - .../organization/project_list_response.py | 21 - .../organization/project_update_params.py | 12 - .../types/organization/projects/__init__.py | 21 - .../types/organization/projects/api_key.py | 40 - .../projects/api_key_delete_response.py | 15 - .../projects/api_key_list_params.py | 24 - .../projects/api_key_list_response.py | 21 - .../organization/projects/project_user.py | 27 - .../types/organization/projects/rate_limit.py | 37 - .../projects/rate_limit_list_params.py | 30 - .../projects/rate_limit_list_response.py | 21 - .../projects/rate_limit_update_params.py | 29 - .../organization/projects/service_account.py | 24 - .../projects/service_account_create_params.py | 12 - .../service_account_create_response.py | 35 - .../service_account_delete_response.py | 15 - .../projects/service_account_list_params.py | 24 - .../projects/service_account_list_response.py | 21 - .../organization/projects/user_add_params.py | 15 - .../projects/user_delete_response.py | 15 - .../organization/projects/user_list_params.py | 24 - .../projects/user_list_response.py | 20 - .../projects/user_update_params.py | 14 - .../usage_audio_speeches_params.py | 55 - .../usage_audio_transcriptions_params.py | 55 - .../usage_code_interpreter_sessions_params.py | 45 - .../organization/usage_completions_params.py | 61 - .../organization/usage_embeddings_params.py | 55 - .../types/organization/usage_images_params.py | 69 - .../organization/usage_moderations_params.py | 55 - .../usage_vector_stores_params.py | 45 - .../organization/user_delete_response.py | 15 - .../types/organization/user_list_params.py | 28 - .../types/organization/user_list_response.py | 21 - .../types/organization/user_update_params.py | 12 - .../types/organization_get_costs_params.py | 43 - .../organization_list_audit_logs_params.py | 87 - .../organization_list_audit_logs_response.py | 433 ----- .../types/output_message.py | 104 -- .../types/output_message_param.py | 104 -- .../types/realtime_create_session_params.py | 230 --- .../types/realtime_create_session_response.py | 151 -- ...ime_create_transcription_session_params.py | 149 -- ...e_create_transcription_session_response.py | 100 -- .../types/reasoning_effort.py | 8 - .../types/reasoning_item.py | 36 - .../types/reasoning_item_param.py | 36 - src/digitalocean_genai_sdk/types/response.py | 142 -- .../types/response_create_params.py | 494 ------ .../types/response_list_input_items_params.py | 28 - .../response_list_input_items_response.py | 76 - .../types/response_properties.py | 362 ---- .../types/response_retrieve_params.py | 26 - .../types/static_chunking_strategy.py | 20 - .../types/static_chunking_strategy_param.py | 22 - .../static_chunking_strategy_request_param.py | 16 - .../types/thread_create_params.py | 130 -- .../types/thread_delete_response.py | 15 - .../types/thread_object.py | 60 - .../types/thread_update_params.py | 51 - .../types/threads/__init__.py | 33 - .../assistant_tools_file_search_type_only.py | 12 - ...stant_tools_file_search_type_only_param.py | 12 - .../assistants_api_tool_choice_option.py | 23 - ...assistants_api_tool_choice_option_param.py | 23 - .../threads/create_message_request_param.py | 71 - .../message_content_image_file_object.py | 30 - ...message_content_image_file_object_param.py | 29 - .../message_content_image_url_object.py | 30 - .../message_content_image_url_object_param.py | 29 - .../types/threads/message_create_params.py | 71 - .../types/threads/message_delete_response.py | 15 - .../types/threads/message_list_params.py | 42 - .../types/threads/message_list_response.py | 20 - .../types/threads/message_object.py | 179 -- .../types/threads/message_update_params.py | 22 - .../types/threads/run_create_params.py | 215 --- .../types/threads/run_create_run_params.py | 178 -- .../types/threads/run_list_params.py | 39 - .../types/threads/run_list_response.py | 20 - .../types/threads/run_object.py | 265 --- .../threads/run_submit_tool_outputs_params.py | 33 - .../types/threads/run_update_params.py | 22 - .../types/threads/runs/__init__.py | 5 - .../types/threads/runs/run_step_object.py | 323 ---- .../types/threads/runs/step_list_params.py | 54 - .../types/threads/runs/step_list_response.py | 20 - .../threads/runs/step_retrieve_params.py | 26 - .../types/threads/truncation_object.py | 25 - .../types/threads/truncation_object_param.py | 25 - .../types/transcription_segment.py | 49 - src/digitalocean_genai_sdk/types/upload.py | 42 - .../types/upload_add_part_params.py | 14 - .../types/upload_add_part_response.py | 21 - .../types/upload_complete_params.py | 19 - .../types/upload_create_params.py | 29 - .../types/usage_response.py | 352 ---- .../types/vector_store_create_params.py | 47 - .../types/vector_store_delete_response.py | 15 - .../types/vector_store_expiration_after.py | 18 - .../vector_store_expiration_after_param.py | 18 - .../types/vector_store_list_params.py | 39 - .../types/vector_store_list_response.py | 20 - .../types/vector_store_object.py | 71 - .../types/vector_store_search_params.py | 40 - .../types/vector_store_search_response.py | 55 - .../types/vector_store_update_params.py | 28 - .../types/vector_stores/__init__.py | 12 - .../chunking_strategy_request_param.py | 13 - .../vector_stores/file_batch_create_params.py | 34 - .../file_batch_list_files_params.py | 47 - .../types/vector_stores/file_create_params.py | 33 - .../vector_stores/file_delete_response.py | 15 - .../types/vector_stores/file_list_params.py | 45 - .../file_retrieve_content_response.py | 30 - .../types/vector_stores/file_update_params.py | 21 - .../list_vector_store_files_response.py | 20 - .../vector_store_file_batch_object.py | 52 - .../vector_stores/vector_store_file_object.py | 88 - .../types/voice_ids_shared.py | 10 - .../types/voice_ids_shared_param.py | 12 - .../types/web_search_tool_call.py | 18 - .../types/web_search_tool_call_param.py | 18 - tests/api_resources/chat/test_completions.py | 585 +------ tests/api_resources/fine_tuning/__init__.py | 1 - .../fine_tuning/checkpoints/__init__.py | 1 - .../checkpoints/test_permissions.py | 309 ---- .../fine_tuning/jobs/__init__.py | 1 - .../fine_tuning/jobs/test_checkpoints.py | 126 -- .../fine_tuning/jobs/test_events.py | 126 -- tests/api_resources/fine_tuning/test_jobs.py | 437 ----- tests/api_resources/organization/__init__.py | 1 - .../organization/projects/__init__.py | 1 - .../organization/projects/test_api_keys.py | 338 ---- .../organization/projects/test_rate_limits.py | 265 --- .../projects/test_service_accounts.py | 431 ----- .../organization/projects/test_users.py | 552 ------ .../organization/test_admin_api_keys.py | 338 ---- .../organization/test_invites.py | 372 ---- .../organization/test_projects.py | 429 ----- .../api_resources/organization/test_usage.py | 834 --------- .../api_resources/organization/test_users.py | 362 ---- tests/api_resources/test_assistants.py | 528 ------ tests/api_resources/test_audio.py | 383 ---- tests/api_resources/test_batches.py | 366 ---- tests/api_resources/test_completions.py | 148 -- tests/api_resources/test_embeddings.py | 4 - tests/api_resources/test_files.py | 430 ----- tests/api_resources/test_images.py | 320 ---- tests/api_resources/test_models.py | 98 +- tests/api_resources/test_moderations.py | 108 -- tests/api_resources/test_organization.py | 219 --- tests/api_resources/test_realtime.py | 269 --- tests/api_resources/test_responses.py | 479 ----- tests/api_resources/test_threads.py | 424 ----- tests/api_resources/test_uploads.py | 399 ----- tests/api_resources/test_vector_stores.py | 603 ------- tests/api_resources/threads/__init__.py | 1 - tests/api_resources/threads/runs/__init__.py | 1 - .../api_resources/threads/runs/test_steps.py | 307 ---- tests/api_resources/threads/test_messages.py | 602 ------- tests/api_resources/threads/test_runs.py | 967 ----------- tests/api_resources/vector_stores/__init__.py | 1 - .../vector_stores/test_file_batches.py | 479 ----- .../api_resources/vector_stores/test_files.py | 677 -------- tests/test_client.py | 205 ++- 342 files changed, 342 insertions(+), 47572 deletions(-) delete mode 100644 src/digitalocean_genai_sdk/resources/assistants.py delete mode 100644 src/digitalocean_genai_sdk/resources/audio.py delete mode 100644 src/digitalocean_genai_sdk/resources/batches.py delete mode 100644 src/digitalocean_genai_sdk/resources/completions.py delete mode 100644 src/digitalocean_genai_sdk/resources/files.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py delete mode 100644 src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py delete mode 100644 src/digitalocean_genai_sdk/resources/images.py delete mode 100644 src/digitalocean_genai_sdk/resources/moderations.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/invites.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/organization.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/projects.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/projects/users.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/usage.py delete mode 100644 src/digitalocean_genai_sdk/resources/organization/users.py delete mode 100644 src/digitalocean_genai_sdk/resources/realtime.py delete mode 100644 src/digitalocean_genai_sdk/resources/responses.py delete mode 100644 src/digitalocean_genai_sdk/resources/threads/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/threads/messages.py delete mode 100644 src/digitalocean_genai_sdk/resources/threads/runs/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/threads/runs/runs.py delete mode 100644 src/digitalocean_genai_sdk/resources/threads/runs/steps.py delete mode 100644 src/digitalocean_genai_sdk/resources/threads/threads.py delete mode 100644 src/digitalocean_genai_sdk/resources/uploads.py delete mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py delete mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/files.py delete mode 100644 src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_object.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_supported_models.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_code.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_code_param.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_file_search.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_function.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_tools_function_param.py delete mode 100644 src/digitalocean_genai_sdk/types/assistant_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py delete mode 100644 src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py delete mode 100644 src/digitalocean_genai_sdk/types/audio_generate_speech_params.py delete mode 100644 src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py delete mode 100644 src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py delete mode 100644 src/digitalocean_genai_sdk/types/audio_translate_audio_params.py delete mode 100644 src/digitalocean_genai_sdk/types/audio_translate_audio_response.py delete mode 100644 src/digitalocean_genai_sdk/types/audit_log_actor_user.py delete mode 100644 src/digitalocean_genai_sdk/types/audit_log_event_type.py delete mode 100644 src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py delete mode 100644 src/digitalocean_genai_sdk/types/batch.py delete mode 100644 src/digitalocean_genai_sdk/types/batch_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/batch_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/batch_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/completion_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/completion_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/completion_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/message_tool_call.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_object.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_text.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_format_text_param.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/web_search_context_size.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/web_search_location.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/web_search_location_param.py delete mode 100644 src/digitalocean_genai_sdk/types/comparison_filter.py delete mode 100644 src/digitalocean_genai_sdk/types/comparison_filter_param.py delete mode 100644 src/digitalocean_genai_sdk/types/completion_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/completion_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/compound_filter.py delete mode 100644 src/digitalocean_genai_sdk/types/compound_filter_param.py delete mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call.py delete mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_output.py delete mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py delete mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_param.py delete mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py delete mode 100644 src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py delete mode 100644 src/digitalocean_genai_sdk/types/create_thread_request_param.py delete mode 100644 src/digitalocean_genai_sdk/types/file_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/file_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/file_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/file_retrieve_content_response.py delete mode 100644 src/digitalocean_genai_sdk/types/file_search_ranker.py delete mode 100644 src/digitalocean_genai_sdk/types/file_search_tool_call.py delete mode 100644 src/digitalocean_genai_sdk/types/file_search_tool_call_param.py delete mode 100644 src/digitalocean_genai_sdk/types/file_upload_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py delete mode 100644 src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/function_object.py delete mode 100644 src/digitalocean_genai_sdk/types/function_object_param.py delete mode 100644 src/digitalocean_genai_sdk/types/function_tool_call.py delete mode 100644 src/digitalocean_genai_sdk/types/function_tool_call_output.py delete mode 100644 src/digitalocean_genai_sdk/types/function_tool_call_output_param.py delete mode 100644 src/digitalocean_genai_sdk/types/function_tool_call_param.py delete mode 100644 src/digitalocean_genai_sdk/types/image_create_edit_params.py delete mode 100644 src/digitalocean_genai_sdk/types/image_create_generation_params.py delete mode 100644 src/digitalocean_genai_sdk/types/image_create_variation_params.py delete mode 100644 src/digitalocean_genai_sdk/types/images_response.py delete mode 100644 src/digitalocean_genai_sdk/types/includable.py delete mode 100644 src/digitalocean_genai_sdk/types/input_content.py delete mode 100644 src/digitalocean_genai_sdk/types/input_content_param.py delete mode 100644 src/digitalocean_genai_sdk/types/input_message.py delete mode 100644 src/digitalocean_genai_sdk/types/input_message_param.py delete mode 100644 src/digitalocean_genai_sdk/types/model_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/model_response_properties.py delete mode 100644 src/digitalocean_genai_sdk/types/moderation_classify_params.py delete mode 100644 src/digitalocean_genai_sdk/types/moderation_classify_response.py delete mode 100644 src/digitalocean_genai_sdk/types/openai_file.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/invite.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/invite_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/invite_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/invite_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/invite_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/organization_user.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/project.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/project_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/project_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/project_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/project_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/project_user.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_completions_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_images_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/user_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/user_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/user_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/organization/user_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization_get_costs_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py delete mode 100644 src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py delete mode 100644 src/digitalocean_genai_sdk/types/output_message.py delete mode 100644 src/digitalocean_genai_sdk/types/output_message_param.py delete mode 100644 src/digitalocean_genai_sdk/types/realtime_create_session_params.py delete mode 100644 src/digitalocean_genai_sdk/types/realtime_create_session_response.py delete mode 100644 src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py delete mode 100644 src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py delete mode 100644 src/digitalocean_genai_sdk/types/reasoning_effort.py delete mode 100644 src/digitalocean_genai_sdk/types/reasoning_item.py delete mode 100644 src/digitalocean_genai_sdk/types/reasoning_item_param.py delete mode 100644 src/digitalocean_genai_sdk/types/response.py delete mode 100644 src/digitalocean_genai_sdk/types/response_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/response_list_input_items_params.py delete mode 100644 src/digitalocean_genai_sdk/types/response_list_input_items_response.py delete mode 100644 src/digitalocean_genai_sdk/types/response_properties.py delete mode 100644 src/digitalocean_genai_sdk/types/response_retrieve_params.py delete mode 100644 src/digitalocean_genai_sdk/types/static_chunking_strategy.py delete mode 100644 src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py delete mode 100644 src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py delete mode 100644 src/digitalocean_genai_sdk/types/thread_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/thread_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/thread_object.py delete mode 100644 src/digitalocean_genai_sdk/types/thread_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/create_message_request_param.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_object.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/message_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_create_run_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_object.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/run_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/truncation_object.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/truncation_object_param.py delete mode 100644 src/digitalocean_genai_sdk/types/transcription_segment.py delete mode 100644 src/digitalocean_genai_sdk/types/upload.py delete mode 100644 src/digitalocean_genai_sdk/types/upload_add_part_params.py delete mode 100644 src/digitalocean_genai_sdk/types/upload_add_part_response.py delete mode 100644 src/digitalocean_genai_sdk/types/upload_complete_params.py delete mode 100644 src/digitalocean_genai_sdk/types/upload_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/usage_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_expiration_after.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_object.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_search_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_search_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_store_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py delete mode 100644 src/digitalocean_genai_sdk/types/voice_ids_shared.py delete mode 100644 src/digitalocean_genai_sdk/types/voice_ids_shared_param.py delete mode 100644 src/digitalocean_genai_sdk/types/web_search_tool_call.py delete mode 100644 src/digitalocean_genai_sdk/types/web_search_tool_call_param.py delete mode 100644 tests/api_resources/fine_tuning/__init__.py delete mode 100644 tests/api_resources/fine_tuning/checkpoints/__init__.py delete mode 100644 tests/api_resources/fine_tuning/checkpoints/test_permissions.py delete mode 100644 tests/api_resources/fine_tuning/jobs/__init__.py delete mode 100644 tests/api_resources/fine_tuning/jobs/test_checkpoints.py delete mode 100644 tests/api_resources/fine_tuning/jobs/test_events.py delete mode 100644 tests/api_resources/fine_tuning/test_jobs.py delete mode 100644 tests/api_resources/organization/__init__.py delete mode 100644 tests/api_resources/organization/projects/__init__.py delete mode 100644 tests/api_resources/organization/projects/test_api_keys.py delete mode 100644 tests/api_resources/organization/projects/test_rate_limits.py delete mode 100644 tests/api_resources/organization/projects/test_service_accounts.py delete mode 100644 tests/api_resources/organization/projects/test_users.py delete mode 100644 tests/api_resources/organization/test_admin_api_keys.py delete mode 100644 tests/api_resources/organization/test_invites.py delete mode 100644 tests/api_resources/organization/test_projects.py delete mode 100644 tests/api_resources/organization/test_usage.py delete mode 100644 tests/api_resources/organization/test_users.py delete mode 100644 tests/api_resources/test_assistants.py delete mode 100644 tests/api_resources/test_audio.py delete mode 100644 tests/api_resources/test_batches.py delete mode 100644 tests/api_resources/test_completions.py delete mode 100644 tests/api_resources/test_files.py delete mode 100644 tests/api_resources/test_images.py delete mode 100644 tests/api_resources/test_moderations.py delete mode 100644 tests/api_resources/test_organization.py delete mode 100644 tests/api_resources/test_realtime.py delete mode 100644 tests/api_resources/test_responses.py delete mode 100644 tests/api_resources/test_threads.py delete mode 100644 tests/api_resources/test_uploads.py delete mode 100644 tests/api_resources/test_vector_stores.py delete mode 100644 tests/api_resources/threads/__init__.py delete mode 100644 tests/api_resources/threads/runs/__init__.py delete mode 100644 tests/api_resources/threads/runs/test_steps.py delete mode 100644 tests/api_resources/threads/test_messages.py delete mode 100644 tests/api_resources/threads/test_runs.py delete mode 100644 tests/api_resources/vector_stores/__init__.py delete mode 100644 tests/api_resources/vector_stores/test_file_batches.py delete mode 100644 tests/api_resources/vector_stores/test_files.py diff --git a/.stats.yml b/.stats.yml index 8f90d8ba..6b91fe37 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 126 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml -openapi_spec_hash: 686329a97002025d118dc2367755c18d +configured_endpoints: 4 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml +openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a config_hash: 2da74b81015f4ef6cad3a0bcb9025834 diff --git a/README.md b/README.md index b556b556..bdaea964 100644 --- a/README.md +++ b/README.md @@ -33,8 +33,16 @@ client = DigitaloceanGenaiSDK( ), # This is the default and can be omitted ) -assistants = client.assistants.list() -print(assistants.first_id) +create_response = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", +) +print(create_response.id) ``` While you can provide an `api_key` keyword argument, @@ -59,8 +67,16 @@ client = AsyncDigitaloceanGenaiSDK( async def main() -> None: - assistants = await client.assistants.list() - print(assistants.first_id) + create_response = await client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + print(create_response.id) asyncio.run(main()) @@ -86,31 +102,19 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() -assistant_object = client.assistants.create( - model="gpt-4o", - tool_resources={}, +create_response = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream_options={}, ) -print(assistant_object.tool_resources) +print(create_response.stream_options) ``` -## File uploads - -Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`. - -```python -from pathlib import Path -from digitalocean_genai_sdk import DigitaloceanGenaiSDK - -client = DigitaloceanGenaiSDK() - -client.audio.transcribe_audio( - file=Path("/path/to/file"), - model="gpt-4o-transcribe", -) -``` - -The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically. - ## Handling errors When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised. @@ -127,7 +131,15 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() try: - client.assistants.list() + client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) except digitalocean_genai_sdk.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. @@ -170,7 +182,15 @@ client = DigitaloceanGenaiSDK( ) # Or, configure per-request: -client.with_options(max_retries=5).assistants.list() +client.with_options(max_retries=5).chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", +) ``` ### Timeouts @@ -193,7 +213,15 @@ client = DigitaloceanGenaiSDK( ) # Override per-request: -client.with_options(timeout=5.0).assistants.list() +client.with_options(timeout=5.0).chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", +) ``` On timeout, an `APITimeoutError` is thrown. @@ -234,11 +262,17 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() -response = client.assistants.with_raw_response.list() +response = client.chat.completions.with_raw_response.create( + messages=[{ + "content": "string", + "role": "system", + }], + model="llama3-8b-instruct", +) print(response.headers.get('X-My-Header')) -assistant = response.parse() # get the object that `assistants.list()` would have returned -print(assistant.first_id) +completion = response.parse() # get the object that `chat.completions.create()` would have returned +print(completion.id) ``` These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. @@ -252,7 +286,15 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.assistants.with_streaming_response.list() as response: +with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", +) as response: print(response.headers.get("X-My-Header")) for line in response.iter_lines(): diff --git a/api.md b/api.md index daea5075..90a1a7d9 100644 --- a/api.md +++ b/api.md @@ -1,65 +1,3 @@ -# Assistants - -Types: - -```python -from digitalocean_genai_sdk.types import ( - AssistantObject, - AssistantSupportedModels, - AssistantToolsCode, - AssistantToolsFileSearch, - AssistantToolsFunction, - AssistantsAPIResponseFormatOption, - FileSearchRanker, - FunctionObject, - ReasoningEffort, - AssistantListResponse, - AssistantDeleteResponse, -) -``` - -Methods: - -- client.assistants.create(\*\*params) -> AssistantObject -- client.assistants.retrieve(assistant_id) -> AssistantObject -- client.assistants.update(assistant_id, \*\*params) -> AssistantObject -- client.assistants.list(\*\*params) -> AssistantListResponse -- client.assistants.delete(assistant_id) -> AssistantDeleteResponse - -# Audio - -Types: - -```python -from digitalocean_genai_sdk.types import ( - TranscriptionSegment, - VoiceIDsShared, - AudioTranscribeAudioResponse, - AudioTranslateAudioResponse, -) -``` - -Methods: - -- client.audio.generate_speech(\*\*params) -> BinaryAPIResponse -- client.audio.transcribe_audio(\*\*params) -> AudioTranscribeAudioResponse -- client.audio.translate_audio(\*\*params) -> AudioTranslateAudioResponse - -# Batches - -Types: - -```python -from digitalocean_genai_sdk.types import Batch, BatchListResponse -``` - -Methods: - -- client.batches.create(\*\*params) -> Batch -- client.batches.retrieve(batch_id) -> Batch -- client.batches.list(\*\*params) -> BatchListResponse -- client.batches.cancel(batch_id) -> Batch - # Chat ## Completions @@ -70,48 +8,25 @@ Types: from digitalocean_genai_sdk.types.chat import ( CreateModelProperties, CreateResponse, - MessageToolCall, - ModelIDsShared, RequestMessageContentPartText, - ResponseFormatJsonObject, - ResponseFormatJsonSchema, - ResponseFormatText, ResponseMessage, TokenLogprob, Usage, - WebSearchContextSize, - WebSearchLocation, - CompletionListResponse, - CompletionDeleteResponse, - CompletionListMessagesResponse, ) ``` Methods: - client.chat.completions.create(\*\*params) -> CreateResponse -- client.chat.completions.retrieve(completion_id) -> CreateResponse -- client.chat.completions.update(completion_id, \*\*params) -> CreateResponse -- client.chat.completions.list(\*\*params) -> CompletionListResponse -- client.chat.completions.delete(completion_id) -> CompletionDeleteResponse -- client.chat.completions.list_messages(completion_id, \*\*params) -> CompletionListMessagesResponse # Completions Types: ```python -from digitalocean_genai_sdk.types import ( - ChatCompletionStreamOptions, - StopConfiguration, - CompletionCreateResponse, -) +from digitalocean_genai_sdk.types import ChatCompletionStreamOptions, StopConfiguration ``` -Methods: - -- client.completions.create(\*\*params) -> CompletionCreateResponse - # Embeddings Types: @@ -124,504 +39,23 @@ Methods: - client.embeddings.create(\*\*params) -> EmbeddingCreateResponse -# Files - -Types: - -```python -from digitalocean_genai_sdk.types import ( - OpenAIFile, - FileListResponse, - FileDeleteResponse, - FileRetrieveContentResponse, -) -``` - -Methods: - -- client.files.retrieve(file_id) -> OpenAIFile -- client.files.list(\*\*params) -> FileListResponse -- client.files.delete(file_id) -> FileDeleteResponse -- client.files.retrieve_content(file_id) -> str -- client.files.upload(\*\*params) -> OpenAIFile - -# FineTuning - -## Checkpoints - -### Permissions - -Types: - -```python -from digitalocean_genai_sdk.types.fine_tuning.checkpoints import ( - ListFineTuningCheckpointPermission, - PermissionDeleteResponse, -) -``` - -Methods: - -- client.fine_tuning.checkpoints.permissions.create(permission_id, \*\*params) -> ListFineTuningCheckpointPermission -- client.fine_tuning.checkpoints.permissions.retrieve(permission_id, \*\*params) -> ListFineTuningCheckpointPermission -- client.fine_tuning.checkpoints.permissions.delete(permission_id) -> PermissionDeleteResponse - -## Jobs - -Types: - -```python -from digitalocean_genai_sdk.types.fine_tuning import FineTuneMethod, FineTuningJob, JobListResponse -``` - -Methods: - -- client.fine_tuning.jobs.create(\*\*params) -> FineTuningJob -- client.fine_tuning.jobs.retrieve(fine_tuning_job_id) -> FineTuningJob -- client.fine_tuning.jobs.list(\*\*params) -> JobListResponse -- client.fine_tuning.jobs.cancel(fine_tuning_job_id) -> FineTuningJob - -### Checkpoints - -Types: - -```python -from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse -``` - -Methods: - -- client.fine_tuning.jobs.checkpoints.retrieve(fine_tuning_job_id, \*\*params) -> CheckpointRetrieveResponse - -### Events - -Types: - -```python -from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse -``` - -Methods: - -- client.fine_tuning.jobs.events.retrieve(fine_tuning_job_id, \*\*params) -> EventRetrieveResponse - -# Images - -Types: - -```python -from digitalocean_genai_sdk.types import ImagesResponse -``` - -Methods: - -- client.images.create_edit(\*\*params) -> ImagesResponse -- client.images.create_generation(\*\*params) -> ImagesResponse -- client.images.create_variation(\*\*params) -> ImagesResponse - # Models Types: ```python -from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse +from digitalocean_genai_sdk.types import Model, ModelListResponse ``` Methods: - client.models.retrieve(model) -> Model - client.models.list() -> ModelListResponse -- client.models.delete(model) -> ModelDeleteResponse - -# Moderations - -Types: - -```python -from digitalocean_genai_sdk.types import ModerationClassifyResponse -``` - -Methods: - -- client.moderations.classify(\*\*params) -> ModerationClassifyResponse - -# Organization - -Types: - -```python -from digitalocean_genai_sdk.types import ( - AuditLogActorUser, - AuditLogEventType, - UsageResponse, - OrganizationListAuditLogsResponse, -) -``` - -Methods: - -- client.organization.get_costs(\*\*params) -> UsageResponse -- client.organization.list_audit_logs(\*\*params) -> OrganizationListAuditLogsResponse - -## AdminAPIKeys - -Types: - -```python -from digitalocean_genai_sdk.types.organization import ( - AdminAPIKey, - AdminAPIKeyListResponse, - AdminAPIKeyDeleteResponse, -) -``` - -Methods: - -- client.organization.admin_api_keys.create(\*\*params) -> AdminAPIKey -- client.organization.admin_api_keys.retrieve(key_id) -> AdminAPIKey -- client.organization.admin_api_keys.list(\*\*params) -> AdminAPIKeyListResponse -- client.organization.admin_api_keys.delete(key_id) -> AdminAPIKeyDeleteResponse - -## Invites - -Types: - -```python -from digitalocean_genai_sdk.types.organization import ( - Invite, - InviteListResponse, - InviteDeleteResponse, -) -``` - -Methods: - -- client.organization.invites.create(\*\*params) -> Invite -- client.organization.invites.retrieve(invite_id) -> Invite -- client.organization.invites.list(\*\*params) -> InviteListResponse -- client.organization.invites.delete(invite_id) -> InviteDeleteResponse - -## Projects - -Types: - -```python -from digitalocean_genai_sdk.types.organization import Project, ProjectListResponse -``` - -Methods: - -- client.organization.projects.create(\*\*params) -> Project -- client.organization.projects.retrieve(project_id) -> Project -- client.organization.projects.update(project_id, \*\*params) -> Project -- client.organization.projects.list(\*\*params) -> ProjectListResponse -- client.organization.projects.archive(project_id) -> Project - -### APIKeys - -Types: - -```python -from digitalocean_genai_sdk.types.organization.projects import ( - APIKey, - APIKeyListResponse, - APIKeyDeleteResponse, -) -``` - -Methods: - -- client.organization.projects.api_keys.retrieve(key_id, \*, project_id) -> APIKey -- client.organization.projects.api_keys.list(project_id, \*\*params) -> APIKeyListResponse -- client.organization.projects.api_keys.delete(key_id, \*, project_id) -> APIKeyDeleteResponse - -### RateLimits - -Types: - -```python -from digitalocean_genai_sdk.types.organization.projects import RateLimit, RateLimitListResponse -``` - -Methods: - -- client.organization.projects.rate_limits.update(rate_limit_id, \*, project_id, \*\*params) -> RateLimit -- client.organization.projects.rate_limits.list(project_id, \*\*params) -> RateLimitListResponse - -### ServiceAccounts - -Types: - -```python -from digitalocean_genai_sdk.types.organization.projects import ( - ServiceAccount, - ServiceAccountCreateResponse, - ServiceAccountListResponse, - ServiceAccountDeleteResponse, -) -``` - -Methods: - -- client.organization.projects.service_accounts.create(project_id, \*\*params) -> ServiceAccountCreateResponse -- client.organization.projects.service_accounts.retrieve(service_account_id, \*, project_id) -> ServiceAccount -- client.organization.projects.service_accounts.list(project_id, \*\*params) -> ServiceAccountListResponse -- client.organization.projects.service_accounts.delete(service_account_id, \*, project_id) -> ServiceAccountDeleteResponse - -### Users - -Types: - -```python -from digitalocean_genai_sdk.types.organization.projects import ( - ProjectUser, - UserListResponse, - UserDeleteResponse, -) -``` - -Methods: - -- client.organization.projects.users.retrieve(user_id, \*, project_id) -> ProjectUser -- client.organization.projects.users.update(user_id, \*, project_id, \*\*params) -> ProjectUser -- client.organization.projects.users.list(project_id, \*\*params) -> UserListResponse -- client.organization.projects.users.delete(user_id, \*, project_id) -> UserDeleteResponse -- client.organization.projects.users.add(project_id, \*\*params) -> ProjectUser - -## Usage - -Methods: - -- client.organization.usage.audio_speeches(\*\*params) -> UsageResponse -- client.organization.usage.audio_transcriptions(\*\*params) -> UsageResponse -- client.organization.usage.code_interpreter_sessions(\*\*params) -> UsageResponse -- client.organization.usage.completions(\*\*params) -> UsageResponse -- client.organization.usage.embeddings(\*\*params) -> UsageResponse -- client.organization.usage.images(\*\*params) -> UsageResponse -- client.organization.usage.moderations(\*\*params) -> UsageResponse -- client.organization.usage.vector_stores(\*\*params) -> UsageResponse - -## Users - -Types: - -```python -from digitalocean_genai_sdk.types.organization import ( - OrganizationUser, - UserListResponse, - UserDeleteResponse, -) -``` - -Methods: - -- client.organization.users.retrieve(user_id) -> OrganizationUser -- client.organization.users.update(user_id, \*\*params) -> OrganizationUser -- client.organization.users.list(\*\*params) -> UserListResponse -- client.organization.users.delete(user_id) -> UserDeleteResponse - -# Realtime - -Types: - -```python -from digitalocean_genai_sdk.types import ( - RealtimeCreateSessionResponse, - RealtimeCreateTranscriptionSessionResponse, -) -``` - -Methods: - -- client.realtime.create_session(\*\*params) -> RealtimeCreateSessionResponse -- client.realtime.create_transcription_session(\*\*params) -> RealtimeCreateTranscriptionSessionResponse # Responses Types: ```python -from digitalocean_genai_sdk.types import ( - ComputerToolCall, - ComputerToolCallOutput, - ComputerToolCallSafetyCheck, - FileSearchToolCall, - FunctionToolCall, - FunctionToolCallOutput, - Includable, - InputContent, - InputMessage, - ModelResponseProperties, - OutputMessage, - ReasoningItem, - Response, - ResponseProperties, - WebSearchToolCall, - ResponseListInputItemsResponse, -) +from digitalocean_genai_sdk.types import ModelResponseProperties ``` - -Methods: - -- client.responses.create(\*\*params) -> Response -- client.responses.retrieve(response_id, \*\*params) -> Response -- client.responses.delete(response_id) -> None -- client.responses.list_input_items(response_id, \*\*params) -> ResponseListInputItemsResponse - -# Threads - -Types: - -```python -from digitalocean_genai_sdk.types import CreateThreadRequest, ThreadObject, ThreadDeleteResponse -``` - -Methods: - -- client.threads.create(\*\*params) -> ThreadObject -- client.threads.retrieve(thread_id) -> ThreadObject -- client.threads.update(thread_id, \*\*params) -> ThreadObject -- client.threads.delete(thread_id) -> ThreadDeleteResponse - -## Runs - -Types: - -```python -from digitalocean_genai_sdk.types.threads import ( - AssistantsAPIToolChoiceOption, - RunObject, - TruncationObject, - RunListResponse, -) -``` - -Methods: - -- client.threads.runs.create(\*\*params) -> RunObject -- client.threads.runs.retrieve(run_id, \*, thread_id) -> RunObject -- client.threads.runs.update(run_id, \*, thread_id, \*\*params) -> RunObject -- client.threads.runs.list(thread_id, \*\*params) -> RunListResponse -- client.threads.runs.cancel(run_id, \*, thread_id) -> RunObject -- client.threads.runs.create_run(thread_id, \*\*params) -> RunObject -- client.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> RunObject - -### Steps - -Types: - -```python -from digitalocean_genai_sdk.types.threads.runs import RunStepObject, StepListResponse -``` - -Methods: - -- client.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id, \*\*params) -> RunStepObject -- client.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> StepListResponse - -## Messages - -Types: - -```python -from digitalocean_genai_sdk.types.threads import ( - AssistantToolsFileSearchTypeOnly, - CreateMessageRequest, - MessageContentImageFileObject, - MessageContentImageURLObject, - MessageObject, - MessageListResponse, - MessageDeleteResponse, -) -``` - -Methods: - -- client.threads.messages.create(thread_id, \*\*params) -> MessageObject -- client.threads.messages.retrieve(message_id, \*, thread_id) -> MessageObject -- client.threads.messages.update(message_id, \*, thread_id, \*\*params) -> MessageObject -- client.threads.messages.list(thread_id, \*\*params) -> MessageListResponse -- client.threads.messages.delete(message_id, \*, thread_id) -> MessageDeleteResponse - -# Uploads - -Types: - -```python -from digitalocean_genai_sdk.types import Upload, UploadAddPartResponse -``` - -Methods: - -- client.uploads.create(\*\*params) -> Upload -- client.uploads.add_part(upload_id, \*\*params) -> UploadAddPartResponse -- client.uploads.cancel(upload_id) -> Upload -- client.uploads.complete(upload_id, \*\*params) -> Upload - -# VectorStores - -Types: - -```python -from digitalocean_genai_sdk.types import ( - AutoChunkingStrategyRequestParam, - ComparisonFilter, - CompoundFilter, - StaticChunkingStrategy, - StaticChunkingStrategyRequestParam, - VectorStoreExpirationAfter, - VectorStoreObject, - VectorStoreListResponse, - VectorStoreDeleteResponse, - VectorStoreSearchResponse, -) -``` - -Methods: - -- client.vector_stores.create(\*\*params) -> VectorStoreObject -- client.vector_stores.retrieve(vector_store_id) -> VectorStoreObject -- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStoreObject -- client.vector_stores.list(\*\*params) -> VectorStoreListResponse -- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse -- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse - -## FileBatches - -Types: - -```python -from digitalocean_genai_sdk.types.vector_stores import ( - ChunkingStrategyRequestParam, - ListVectorStoreFilesResponse, - VectorStoreFileBatchObject, -) -``` - -Methods: - -- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatchObject -- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject -- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatchObject -- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> ListVectorStoreFilesResponse - -## Files - -Types: - -```python -from digitalocean_genai_sdk.types.vector_stores import ( - VectorStoreFileObject, - FileDeleteResponse, - FileRetrieveContentResponse, -) -``` - -Methods: - -- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFileObject -- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFileObject -- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFileObject -- client.vector_stores.files.list(vector_store_id, \*\*params) -> ListVectorStoreFilesResponse -- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse -- client.vector_stores.files.retrieve_content(file_id, \*, vector_store_id) -> FileRetrieveContentResponse diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py index 99580b5e..8a0fb675 100644 --- a/src/digitalocean_genai_sdk/_client.py +++ b/src/digitalocean_genai_sdk/_client.py @@ -21,20 +21,7 @@ ) from ._utils import is_given, get_async_library from ._version import __version__ -from .resources import ( - audio, - files, - images, - models, - batches, - uploads, - realtime, - responses, - assistants, - embeddings, - completions, - moderations, -) +from .resources import models, embeddings from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError from ._base_client import ( @@ -43,10 +30,6 @@ AsyncAPIClient, ) from .resources.chat import chat -from .resources.threads import threads -from .resources.fine_tuning import fine_tuning -from .resources.organization import organization -from .resources.vector_stores import vector_stores __all__ = [ "Timeout", @@ -61,23 +44,9 @@ class DigitaloceanGenaiSDK(SyncAPIClient): - assistants: assistants.AssistantsResource - audio: audio.AudioResource - batches: batches.BatchesResource chat: chat.ChatResource - completions: completions.CompletionsResource embeddings: embeddings.EmbeddingsResource - files: files.FilesResource - fine_tuning: fine_tuning.FineTuningResource - images: images.ImagesResource models: models.ModelsResource - moderations: moderations.ModerationsResource - organization: organization.OrganizationResource - realtime: realtime.RealtimeResource - responses: responses.ResponsesResource - threads: threads.ThreadsResource - uploads: uploads.UploadsResource - vector_stores: vector_stores.VectorStoresResource with_raw_response: DigitaloceanGenaiSDKWithRawResponse with_streaming_response: DigitaloceanGenaiSDKWithStreamedResponse @@ -135,23 +104,9 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.assistants = assistants.AssistantsResource(self) - self.audio = audio.AudioResource(self) - self.batches = batches.BatchesResource(self) self.chat = chat.ChatResource(self) - self.completions = completions.CompletionsResource(self) self.embeddings = embeddings.EmbeddingsResource(self) - self.files = files.FilesResource(self) - self.fine_tuning = fine_tuning.FineTuningResource(self) - self.images = images.ImagesResource(self) self.models = models.ModelsResource(self) - self.moderations = moderations.ModerationsResource(self) - self.organization = organization.OrganizationResource(self) - self.realtime = realtime.RealtimeResource(self) - self.responses = responses.ResponsesResource(self) - self.threads = threads.ThreadsResource(self) - self.uploads = uploads.UploadsResource(self) - self.vector_stores = vector_stores.VectorStoresResource(self) self.with_raw_response = DigitaloceanGenaiSDKWithRawResponse(self) self.with_streaming_response = DigitaloceanGenaiSDKWithStreamedResponse(self) @@ -261,23 +216,9 @@ def _make_status_error( class AsyncDigitaloceanGenaiSDK(AsyncAPIClient): - assistants: assistants.AsyncAssistantsResource - audio: audio.AsyncAudioResource - batches: batches.AsyncBatchesResource chat: chat.AsyncChatResource - completions: completions.AsyncCompletionsResource embeddings: embeddings.AsyncEmbeddingsResource - files: files.AsyncFilesResource - fine_tuning: fine_tuning.AsyncFineTuningResource - images: images.AsyncImagesResource models: models.AsyncModelsResource - moderations: moderations.AsyncModerationsResource - organization: organization.AsyncOrganizationResource - realtime: realtime.AsyncRealtimeResource - responses: responses.AsyncResponsesResource - threads: threads.AsyncThreadsResource - uploads: uploads.AsyncUploadsResource - vector_stores: vector_stores.AsyncVectorStoresResource with_raw_response: AsyncDigitaloceanGenaiSDKWithRawResponse with_streaming_response: AsyncDigitaloceanGenaiSDKWithStreamedResponse @@ -335,23 +276,9 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.assistants = assistants.AsyncAssistantsResource(self) - self.audio = audio.AsyncAudioResource(self) - self.batches = batches.AsyncBatchesResource(self) self.chat = chat.AsyncChatResource(self) - self.completions = completions.AsyncCompletionsResource(self) self.embeddings = embeddings.AsyncEmbeddingsResource(self) - self.files = files.AsyncFilesResource(self) - self.fine_tuning = fine_tuning.AsyncFineTuningResource(self) - self.images = images.AsyncImagesResource(self) self.models = models.AsyncModelsResource(self) - self.moderations = moderations.AsyncModerationsResource(self) - self.organization = organization.AsyncOrganizationResource(self) - self.realtime = realtime.AsyncRealtimeResource(self) - self.responses = responses.AsyncResponsesResource(self) - self.threads = threads.AsyncThreadsResource(self) - self.uploads = uploads.AsyncUploadsResource(self) - self.vector_stores = vector_stores.AsyncVectorStoresResource(self) self.with_raw_response = AsyncDigitaloceanGenaiSDKWithRawResponse(self) self.with_streaming_response = AsyncDigitaloceanGenaiSDKWithStreamedResponse(self) @@ -462,86 +389,30 @@ def _make_status_error( class DigitaloceanGenaiSDKWithRawResponse: def __init__(self, client: DigitaloceanGenaiSDK) -> None: - self.assistants = assistants.AssistantsResourceWithRawResponse(client.assistants) - self.audio = audio.AudioResourceWithRawResponse(client.audio) - self.batches = batches.BatchesResourceWithRawResponse(client.batches) self.chat = chat.ChatResourceWithRawResponse(client.chat) - self.completions = completions.CompletionsResourceWithRawResponse(client.completions) self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings) - self.files = files.FilesResourceWithRawResponse(client.files) - self.fine_tuning = fine_tuning.FineTuningResourceWithRawResponse(client.fine_tuning) - self.images = images.ImagesResourceWithRawResponse(client.images) self.models = models.ModelsResourceWithRawResponse(client.models) - self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations) - self.organization = organization.OrganizationResourceWithRawResponse(client.organization) - self.realtime = realtime.RealtimeResourceWithRawResponse(client.realtime) - self.responses = responses.ResponsesResourceWithRawResponse(client.responses) - self.threads = threads.ThreadsResourceWithRawResponse(client.threads) - self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads) - self.vector_stores = vector_stores.VectorStoresResourceWithRawResponse(client.vector_stores) class AsyncDigitaloceanGenaiSDKWithRawResponse: def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: - self.assistants = assistants.AsyncAssistantsResourceWithRawResponse(client.assistants) - self.audio = audio.AsyncAudioResourceWithRawResponse(client.audio) - self.batches = batches.AsyncBatchesResourceWithRawResponse(client.batches) self.chat = chat.AsyncChatResourceWithRawResponse(client.chat) - self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions) self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings) - self.files = files.AsyncFilesResourceWithRawResponse(client.files) - self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithRawResponse(client.fine_tuning) - self.images = images.AsyncImagesResourceWithRawResponse(client.images) self.models = models.AsyncModelsResourceWithRawResponse(client.models) - self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations) - self.organization = organization.AsyncOrganizationResourceWithRawResponse(client.organization) - self.realtime = realtime.AsyncRealtimeResourceWithRawResponse(client.realtime) - self.responses = responses.AsyncResponsesResourceWithRawResponse(client.responses) - self.threads = threads.AsyncThreadsResourceWithRawResponse(client.threads) - self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads) - self.vector_stores = vector_stores.AsyncVectorStoresResourceWithRawResponse(client.vector_stores) class DigitaloceanGenaiSDKWithStreamedResponse: def __init__(self, client: DigitaloceanGenaiSDK) -> None: - self.assistants = assistants.AssistantsResourceWithStreamingResponse(client.assistants) - self.audio = audio.AudioResourceWithStreamingResponse(client.audio) - self.batches = batches.BatchesResourceWithStreamingResponse(client.batches) self.chat = chat.ChatResourceWithStreamingResponse(client.chat) - self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions) self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings) - self.files = files.FilesResourceWithStreamingResponse(client.files) - self.fine_tuning = fine_tuning.FineTuningResourceWithStreamingResponse(client.fine_tuning) - self.images = images.ImagesResourceWithStreamingResponse(client.images) self.models = models.ModelsResourceWithStreamingResponse(client.models) - self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations) - self.organization = organization.OrganizationResourceWithStreamingResponse(client.organization) - self.realtime = realtime.RealtimeResourceWithStreamingResponse(client.realtime) - self.responses = responses.ResponsesResourceWithStreamingResponse(client.responses) - self.threads = threads.ThreadsResourceWithStreamingResponse(client.threads) - self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads) - self.vector_stores = vector_stores.VectorStoresResourceWithStreamingResponse(client.vector_stores) class AsyncDigitaloceanGenaiSDKWithStreamedResponse: def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: - self.assistants = assistants.AsyncAssistantsResourceWithStreamingResponse(client.assistants) - self.audio = audio.AsyncAudioResourceWithStreamingResponse(client.audio) - self.batches = batches.AsyncBatchesResourceWithStreamingResponse(client.batches) self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat) - self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions) self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings) - self.files = files.AsyncFilesResourceWithStreamingResponse(client.files) - self.fine_tuning = fine_tuning.AsyncFineTuningResourceWithStreamingResponse(client.fine_tuning) - self.images = images.AsyncImagesResourceWithStreamingResponse(client.images) self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) - self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations) - self.organization = organization.AsyncOrganizationResourceWithStreamingResponse(client.organization) - self.realtime = realtime.AsyncRealtimeResourceWithStreamingResponse(client.realtime) - self.responses = responses.AsyncResponsesResourceWithStreamingResponse(client.responses) - self.threads = threads.AsyncThreadsResourceWithStreamingResponse(client.threads) - self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads) - self.vector_stores = vector_stores.AsyncVectorStoresResourceWithStreamingResponse(client.vector_stores) Client = DigitaloceanGenaiSDK diff --git a/src/digitalocean_genai_sdk/_files.py b/src/digitalocean_genai_sdk/_files.py index 02512281..715cc207 100644 --- a/src/digitalocean_genai_sdk/_files.py +++ b/src/digitalocean_genai_sdk/_files.py @@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None: if not is_file_content(obj): prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" raise RuntimeError( - f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/digitalocean/genai-python/tree/main#file-uploads" + f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead." ) from None diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py index 237b0ca7..c9177434 100644 --- a/src/digitalocean_genai_sdk/resources/__init__.py +++ b/src/digitalocean_genai_sdk/resources/__init__.py @@ -8,30 +8,6 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) -from .audio import ( - AudioResource, - AsyncAudioResource, - AudioResourceWithRawResponse, - AsyncAudioResourceWithRawResponse, - AudioResourceWithStreamingResponse, - AsyncAudioResourceWithStreamingResponse, -) -from .files import ( - FilesResource, - AsyncFilesResource, - FilesResourceWithRawResponse, - AsyncFilesResourceWithRawResponse, - FilesResourceWithStreamingResponse, - AsyncFilesResourceWithStreamingResponse, -) -from .images import ( - ImagesResource, - AsyncImagesResource, - ImagesResourceWithRawResponse, - AsyncImagesResourceWithRawResponse, - ImagesResourceWithStreamingResponse, - AsyncImagesResourceWithStreamingResponse, -) from .models import ( ModelsResource, AsyncModelsResource, @@ -40,54 +16,6 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) -from .batches import ( - BatchesResource, - AsyncBatchesResource, - BatchesResourceWithRawResponse, - AsyncBatchesResourceWithRawResponse, - BatchesResourceWithStreamingResponse, - AsyncBatchesResourceWithStreamingResponse, -) -from .threads import ( - ThreadsResource, - AsyncThreadsResource, - ThreadsResourceWithRawResponse, - AsyncThreadsResourceWithRawResponse, - ThreadsResourceWithStreamingResponse, - AsyncThreadsResourceWithStreamingResponse, -) -from .uploads import ( - UploadsResource, - AsyncUploadsResource, - UploadsResourceWithRawResponse, - AsyncUploadsResourceWithRawResponse, - UploadsResourceWithStreamingResponse, - AsyncUploadsResourceWithStreamingResponse, -) -from .realtime import ( - RealtimeResource, - AsyncRealtimeResource, - RealtimeResourceWithRawResponse, - AsyncRealtimeResourceWithRawResponse, - RealtimeResourceWithStreamingResponse, - AsyncRealtimeResourceWithStreamingResponse, -) -from .responses import ( - ResponsesResource, - AsyncResponsesResource, - ResponsesResourceWithRawResponse, - AsyncResponsesResourceWithRawResponse, - ResponsesResourceWithStreamingResponse, - AsyncResponsesResourceWithStreamingResponse, -) -from .assistants import ( - AssistantsResource, - AsyncAssistantsResource, - AssistantsResourceWithRawResponse, - AsyncAssistantsResourceWithRawResponse, - AssistantsResourceWithStreamingResponse, - AsyncAssistantsResourceWithStreamingResponse, -) from .embeddings import ( EmbeddingsResource, AsyncEmbeddingsResource, @@ -96,148 +24,24 @@ EmbeddingsResourceWithStreamingResponse, AsyncEmbeddingsResourceWithStreamingResponse, ) -from .completions import ( - CompletionsResource, - AsyncCompletionsResource, - CompletionsResourceWithRawResponse, - AsyncCompletionsResourceWithRawResponse, - CompletionsResourceWithStreamingResponse, - AsyncCompletionsResourceWithStreamingResponse, -) -from .fine_tuning import ( - FineTuningResource, - AsyncFineTuningResource, - FineTuningResourceWithRawResponse, - AsyncFineTuningResourceWithRawResponse, - FineTuningResourceWithStreamingResponse, - AsyncFineTuningResourceWithStreamingResponse, -) -from .moderations import ( - ModerationsResource, - AsyncModerationsResource, - ModerationsResourceWithRawResponse, - AsyncModerationsResourceWithRawResponse, - ModerationsResourceWithStreamingResponse, - AsyncModerationsResourceWithStreamingResponse, -) -from .organization import ( - OrganizationResource, - AsyncOrganizationResource, - OrganizationResourceWithRawResponse, - AsyncOrganizationResourceWithRawResponse, - OrganizationResourceWithStreamingResponse, - AsyncOrganizationResourceWithStreamingResponse, -) -from .vector_stores import ( - VectorStoresResource, - AsyncVectorStoresResource, - VectorStoresResourceWithRawResponse, - AsyncVectorStoresResourceWithRawResponse, - VectorStoresResourceWithStreamingResponse, - AsyncVectorStoresResourceWithStreamingResponse, -) __all__ = [ - "AssistantsResource", - "AsyncAssistantsResource", - "AssistantsResourceWithRawResponse", - "AsyncAssistantsResourceWithRawResponse", - "AssistantsResourceWithStreamingResponse", - "AsyncAssistantsResourceWithStreamingResponse", - "AudioResource", - "AsyncAudioResource", - "AudioResourceWithRawResponse", - "AsyncAudioResourceWithRawResponse", - "AudioResourceWithStreamingResponse", - "AsyncAudioResourceWithStreamingResponse", - "BatchesResource", - "AsyncBatchesResource", - "BatchesResourceWithRawResponse", - "AsyncBatchesResourceWithRawResponse", - "BatchesResourceWithStreamingResponse", - "AsyncBatchesResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", - "CompletionsResource", - "AsyncCompletionsResource", - "CompletionsResourceWithRawResponse", - "AsyncCompletionsResourceWithRawResponse", - "CompletionsResourceWithStreamingResponse", - "AsyncCompletionsResourceWithStreamingResponse", "EmbeddingsResource", "AsyncEmbeddingsResource", "EmbeddingsResourceWithRawResponse", "AsyncEmbeddingsResourceWithRawResponse", "EmbeddingsResourceWithStreamingResponse", "AsyncEmbeddingsResourceWithStreamingResponse", - "FilesResource", - "AsyncFilesResource", - "FilesResourceWithRawResponse", - "AsyncFilesResourceWithRawResponse", - "FilesResourceWithStreamingResponse", - "AsyncFilesResourceWithStreamingResponse", - "FineTuningResource", - "AsyncFineTuningResource", - "FineTuningResourceWithRawResponse", - "AsyncFineTuningResourceWithRawResponse", - "FineTuningResourceWithStreamingResponse", - "AsyncFineTuningResourceWithStreamingResponse", - "ImagesResource", - "AsyncImagesResource", - "ImagesResourceWithRawResponse", - "AsyncImagesResourceWithRawResponse", - "ImagesResourceWithStreamingResponse", - "AsyncImagesResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", "AsyncModelsResourceWithRawResponse", "ModelsResourceWithStreamingResponse", "AsyncModelsResourceWithStreamingResponse", - "ModerationsResource", - "AsyncModerationsResource", - "ModerationsResourceWithRawResponse", - "AsyncModerationsResourceWithRawResponse", - "ModerationsResourceWithStreamingResponse", - "AsyncModerationsResourceWithStreamingResponse", - "OrganizationResource", - "AsyncOrganizationResource", - "OrganizationResourceWithRawResponse", - "AsyncOrganizationResourceWithRawResponse", - "OrganizationResourceWithStreamingResponse", - "AsyncOrganizationResourceWithStreamingResponse", - "RealtimeResource", - "AsyncRealtimeResource", - "RealtimeResourceWithRawResponse", - "AsyncRealtimeResourceWithRawResponse", - "RealtimeResourceWithStreamingResponse", - "AsyncRealtimeResourceWithStreamingResponse", - "ResponsesResource", - "AsyncResponsesResource", - "ResponsesResourceWithRawResponse", - "AsyncResponsesResourceWithRawResponse", - "ResponsesResourceWithStreamingResponse", - "AsyncResponsesResourceWithStreamingResponse", - "ThreadsResource", - "AsyncThreadsResource", - "ThreadsResourceWithRawResponse", - "AsyncThreadsResourceWithRawResponse", - "ThreadsResourceWithStreamingResponse", - "AsyncThreadsResourceWithStreamingResponse", - "UploadsResource", - "AsyncUploadsResource", - "UploadsResourceWithRawResponse", - "AsyncUploadsResourceWithRawResponse", - "UploadsResourceWithStreamingResponse", - "AsyncUploadsResourceWithStreamingResponse", - "VectorStoresResource", - "AsyncVectorStoresResource", - "VectorStoresResourceWithRawResponse", - "AsyncVectorStoresResourceWithRawResponse", - "VectorStoresResourceWithStreamingResponse", - "AsyncVectorStoresResourceWithStreamingResponse", ] diff --git a/src/digitalocean_genai_sdk/resources/assistants.py b/src/digitalocean_genai_sdk/resources/assistants.py deleted file mode 100644 index 6e31fe9e..00000000 --- a/src/digitalocean_genai_sdk/resources/assistants.py +++ /dev/null @@ -1,910 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable, Optional -from typing_extensions import Literal - -import httpx - -from ..types import ( - ReasoningEffort, - assistant_list_params, - assistant_create_params, - assistant_update_params, -) -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.assistant_object import AssistantObject -from ..types.reasoning_effort import ReasoningEffort -from ..types.assistant_list_response import AssistantListResponse -from ..types.assistant_delete_response import AssistantDeleteResponse -from ..types.assistant_supported_models import AssistantSupportedModels -from ..types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam - -__all__ = ["AssistantsResource", "AsyncAssistantsResource"] - - -class AssistantsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> AssistantsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AssistantsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AssistantsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AssistantsResourceWithStreamingResponse(self) - - def create( - self, - *, - model: Union[str, AssistantSupportedModels], - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantObject: - """ - Create an assistant with a model and instructions. - - Args: - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - description: The description of the assistant. The maximum length is 512 characters. - - instructions: The system instructions that the assistant uses. The maximum length is 256,000 - characters. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - name: The name of the assistant. The maximum length is 256 characters. - - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_resources: A set of resources that are used by the assistant's tools. The resources are - specific to the type of tool. For example, the `code_interpreter` tool requires - a list of file IDs, while the `file_search` tool requires a list of vector store - IDs. - - tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per - assistant. Tools can be of types `code_interpreter`, `file_search`, or - `function`. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/assistants", - body=maybe_transform( - { - "model": model, - "description": description, - "instructions": instructions, - "metadata": metadata, - "name": name, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - }, - assistant_create_params.AssistantCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantObject, - ) - - def retrieve( - self, - assistant_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantObject: - """ - Retrieves an assistant. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not assistant_id: - raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") - return self._get( - f"/assistants/{assistant_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantObject, - ) - - def update( - self, - assistant_id: str, - *, - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantObject: - """Modifies an assistant. - - Args: - description: The description of the assistant. - - The maximum length is 512 characters. - - instructions: The system instructions that the assistant uses. The maximum length is 256,000 - characters. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - name: The name of the assistant. The maximum length is 256 characters. - - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_resources: A set of resources that are used by the assistant's tools. The resources are - specific to the type of tool. For example, the `code_interpreter` tool requires - a list of file IDs, while the `file_search` tool requires a list of vector store - IDs. - - tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per - assistant. Tools can be of types `code_interpreter`, `file_search`, or - `function`. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not assistant_id: - raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") - return self._post( - f"/assistants/{assistant_id}", - body=maybe_transform( - { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - }, - assistant_update_params.AssistantUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantObject, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantListResponse: - """Returns a list of assistants. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/assistants", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - assistant_list_params.AssistantListParams, - ), - ), - cast_to=AssistantListResponse, - ) - - def delete( - self, - assistant_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantDeleteResponse: - """ - Delete an assistant. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not assistant_id: - raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") - return self._delete( - f"/assistants/{assistant_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantDeleteResponse, - ) - - -class AsyncAssistantsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAssistantsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAssistantsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAssistantsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAssistantsResourceWithStreamingResponse(self) - - async def create( - self, - *, - model: Union[str, AssistantSupportedModels], - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantObject: - """ - Create an assistant with a model and instructions. - - Args: - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - description: The description of the assistant. The maximum length is 512 characters. - - instructions: The system instructions that the assistant uses. The maximum length is 256,000 - characters. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - name: The name of the assistant. The maximum length is 256 characters. - - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_resources: A set of resources that are used by the assistant's tools. The resources are - specific to the type of tool. For example, the `code_interpreter` tool requires - a list of file IDs, while the `file_search` tool requires a list of vector store - IDs. - - tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per - assistant. Tools can be of types `code_interpreter`, `file_search`, or - `function`. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/assistants", - body=await async_maybe_transform( - { - "model": model, - "description": description, - "instructions": instructions, - "metadata": metadata, - "name": name, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - }, - assistant_create_params.AssistantCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantObject, - ) - - async def retrieve( - self, - assistant_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantObject: - """ - Retrieves an assistant. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not assistant_id: - raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") - return await self._get( - f"/assistants/{assistant_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantObject, - ) - - async def update( - self, - assistant_id: str, - *, - description: Optional[str] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: Union[str, AssistantSupportedModels] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantObject: - """Modifies an assistant. - - Args: - description: The description of the assistant. - - The maximum length is 512 characters. - - instructions: The system instructions that the assistant uses. The maximum length is 256,000 - characters. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - name: The name of the assistant. The maximum length is 256 characters. - - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_resources: A set of resources that are used by the assistant's tools. The resources are - specific to the type of tool. For example, the `code_interpreter` tool requires - a list of file IDs, while the `file_search` tool requires a list of vector store - IDs. - - tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per - assistant. Tools can be of types `code_interpreter`, `file_search`, or - `function`. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not assistant_id: - raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") - return await self._post( - f"/assistants/{assistant_id}", - body=await async_maybe_transform( - { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - }, - assistant_update_params.AssistantUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantObject, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantListResponse: - """Returns a list of assistants. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/assistants", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - assistant_list_params.AssistantListParams, - ), - ), - cast_to=AssistantListResponse, - ) - - async def delete( - self, - assistant_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantDeleteResponse: - """ - Delete an assistant. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not assistant_id: - raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") - return await self._delete( - f"/assistants/{assistant_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AssistantDeleteResponse, - ) - - -class AssistantsResourceWithRawResponse: - def __init__(self, assistants: AssistantsResource) -> None: - self._assistants = assistants - - self.create = to_raw_response_wrapper( - assistants.create, - ) - self.retrieve = to_raw_response_wrapper( - assistants.retrieve, - ) - self.update = to_raw_response_wrapper( - assistants.update, - ) - self.list = to_raw_response_wrapper( - assistants.list, - ) - self.delete = to_raw_response_wrapper( - assistants.delete, - ) - - -class AsyncAssistantsResourceWithRawResponse: - def __init__(self, assistants: AsyncAssistantsResource) -> None: - self._assistants = assistants - - self.create = async_to_raw_response_wrapper( - assistants.create, - ) - self.retrieve = async_to_raw_response_wrapper( - assistants.retrieve, - ) - self.update = async_to_raw_response_wrapper( - assistants.update, - ) - self.list = async_to_raw_response_wrapper( - assistants.list, - ) - self.delete = async_to_raw_response_wrapper( - assistants.delete, - ) - - -class AssistantsResourceWithStreamingResponse: - def __init__(self, assistants: AssistantsResource) -> None: - self._assistants = assistants - - self.create = to_streamed_response_wrapper( - assistants.create, - ) - self.retrieve = to_streamed_response_wrapper( - assistants.retrieve, - ) - self.update = to_streamed_response_wrapper( - assistants.update, - ) - self.list = to_streamed_response_wrapper( - assistants.list, - ) - self.delete = to_streamed_response_wrapper( - assistants.delete, - ) - - -class AsyncAssistantsResourceWithStreamingResponse: - def __init__(self, assistants: AsyncAssistantsResource) -> None: - self._assistants = assistants - - self.create = async_to_streamed_response_wrapper( - assistants.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - assistants.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - assistants.update, - ) - self.list = async_to_streamed_response_wrapper( - assistants.list, - ) - self.delete = async_to_streamed_response_wrapper( - assistants.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/audio.py b/src/digitalocean_genai_sdk/resources/audio.py deleted file mode 100644 index bf023ba0..00000000 --- a/src/digitalocean_genai_sdk/resources/audio.py +++ /dev/null @@ -1,650 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Any, List, Union, Mapping, Optional, cast -from typing_extensions import Literal - -import httpx - -from ..types import ( - audio_generate_speech_params, - audio_translate_audio_params, - audio_transcribe_audio_params, -) -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - BinaryAPIResponse, - AsyncBinaryAPIResponse, - StreamedBinaryAPIResponse, - AsyncStreamedBinaryAPIResponse, - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - to_custom_raw_response_wrapper, - async_to_streamed_response_wrapper, - to_custom_streamed_response_wrapper, - async_to_custom_raw_response_wrapper, - async_to_custom_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.voice_ids_shared_param import VoiceIDsSharedParam -from ..types.audio_translate_audio_response import AudioTranslateAudioResponse -from ..types.audio_transcribe_audio_response import AudioTranscribeAudioResponse - -__all__ = ["AudioResource", "AsyncAudioResource"] - - -class AudioResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> AudioResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AudioResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AudioResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AudioResourceWithStreamingResponse(self) - - def generate_speech( - self, - *, - input: str, - model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]], - voice: VoiceIDsSharedParam, - instructions: str | NotGiven = NOT_GIVEN, - response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, - speed: float | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BinaryAPIResponse: - """ - Generates audio from the input text. - - Args: - input: The text to generate audio for. The maximum length is 4096 characters. - - model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or - `gpt-4o-mini-tts`. - - voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. Previews of the voices are available in the - [Text to speech guide](/docs/guides/text-to-speech#voice-options). - - instructions: Control the voice of your generated audio with additional instructions. Does not - work with `tts-1` or `tts-1-hd`. - - response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - `wav`, and `pcm`. - - speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} - return self._post( - "/audio/speech", - body=maybe_transform( - { - "input": input, - "model": model, - "voice": voice, - "instructions": instructions, - "response_format": response_format, - "speed": speed, - }, - audio_generate_speech_params.AudioGenerateSpeechParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=BinaryAPIResponse, - ) - - def transcribe_audio( - self, - *, - file: FileTypes, - model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]], - include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AudioTranscribeAudioResponse: - """ - Transcribes audio into the input language. - - Args: - file: - The audio file object (not file name) to transcribe, in one of these formats: - flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - - model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). - - include: Additional information to include in the transcription response. `logprobs` will - return the log probabilities of the tokens in the response to understand the - model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. - - language: The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - format will improve accuracy and latency. - - prompt: An optional text to guide the model's style or continue a previous audio - segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the - audio language. - - response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the - [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) - for more information. - - Note: Streaming is not supported for the `whisper-1` model and will be ignored. - - temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - output more random, while lower values like 0.2 will make it more focused and - deterministic. If set to 0, the model will use - [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - - timestamp_granularities: The timestamp granularities to populate for this transcription. - `response_format` must be set `verbose_json` to use timestamp granularities. - Either or both of these options are supported: `word`, or `segment`. Note: There - is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "file": file, - "model": model, - "include": include, - "language": language, - "prompt": prompt, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "timestamp_granularities": timestamp_granularities, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return cast( - AudioTranscribeAudioResponse, - self._post( - "/audio/transcriptions", - body=maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, AudioTranscribeAudioResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - def translate_audio( - self, - *, - file: FileTypes, - model: Union[str, Literal["whisper-1"]], - prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AudioTranslateAudioResponse: - """ - Translates audio into English. - - Args: - file: The audio file object (not file name) translate, in one of these formats: flac, - mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - - model: ID of the model to use. Only `whisper-1` (which is powered by our open source - Whisper V2 model) is currently available. - - prompt: An optional text to guide the model's style or continue a previous audio - segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in - English. - - response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. - - temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - output more random, while lower values like 0.2 will make it more focused and - deterministic. If set to 0, the model will use - [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "file": file, - "model": model, - "prompt": prompt, - "response_format": response_format, - "temperature": temperature, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return cast( - AudioTranslateAudioResponse, - self._post( - "/audio/translations", - body=maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, AudioTranslateAudioResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - -class AsyncAudioResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAudioResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAudioResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAudioResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAudioResourceWithStreamingResponse(self) - - async def generate_speech( - self, - *, - input: str, - model: Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]], - voice: VoiceIDsSharedParam, - instructions: str | NotGiven = NOT_GIVEN, - response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, - speed: float | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncBinaryAPIResponse: - """ - Generates audio from the input text. - - Args: - input: The text to generate audio for. The maximum length is 4096 characters. - - model: One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or - `gpt-4o-mini-tts`. - - voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. Previews of the voices are available in the - [Text to speech guide](/docs/guides/text-to-speech#voice-options). - - instructions: Control the voice of your generated audio with additional instructions. Does not - work with `tts-1` or `tts-1-hd`. - - response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - `wav`, and `pcm`. - - speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})} - return await self._post( - "/audio/speech", - body=await async_maybe_transform( - { - "input": input, - "model": model, - "voice": voice, - "instructions": instructions, - "response_format": response_format, - "speed": speed, - }, - audio_generate_speech_params.AudioGenerateSpeechParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AsyncBinaryAPIResponse, - ) - - async def transcribe_audio( - self, - *, - file: FileTypes, - model: Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]], - include: List[Literal["logprobs"]] | NotGiven = NOT_GIVEN, - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AudioTranscribeAudioResponse: - """ - Transcribes audio into the input language. - - Args: - file: - The audio file object (not file name) to transcribe, in one of these formats: - flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - - model: ID of the model to use. The options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source - Whisper V2 model). - - include: Additional information to include in the transcription response. `logprobs` will - return the log probabilities of the tokens in the response to understand the - model's confidence in the transcription. `logprobs` only works with - response_format set to `json` and only with the models `gpt-4o-transcribe` and - `gpt-4o-mini-transcribe`. - - language: The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - format will improve accuracy and latency. - - prompt: An optional text to guide the model's style or continue a previous audio - segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the - audio language. - - response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the - [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) - for more information. - - Note: Streaming is not supported for the `whisper-1` model and will be ignored. - - temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - output more random, while lower values like 0.2 will make it more focused and - deterministic. If set to 0, the model will use - [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - - timestamp_granularities: The timestamp granularities to populate for this transcription. - `response_format` must be set `verbose_json` to use timestamp granularities. - Either or both of these options are supported: `word`, or `segment`. Note: There - is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "file": file, - "model": model, - "include": include, - "language": language, - "prompt": prompt, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "timestamp_granularities": timestamp_granularities, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return cast( - AudioTranscribeAudioResponse, - await self._post( - "/audio/transcriptions", - body=await async_maybe_transform(body, audio_transcribe_audio_params.AudioTranscribeAudioParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, AudioTranscribeAudioResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - async def translate_audio( - self, - *, - file: FileTypes, - model: Union[str, Literal["whisper-1"]], - prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AudioTranslateAudioResponse: - """ - Translates audio into English. - - Args: - file: The audio file object (not file name) translate, in one of these formats: flac, - mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - - model: ID of the model to use. Only `whisper-1` (which is powered by our open source - Whisper V2 model) is currently available. - - prompt: An optional text to guide the model's style or continue a previous audio - segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in - English. - - response_format: The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. - - temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - output more random, while lower values like 0.2 will make it more focused and - deterministic. If set to 0, the model will use - [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "file": file, - "model": model, - "prompt": prompt, - "response_format": response_format, - "temperature": temperature, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return cast( - AudioTranslateAudioResponse, - await self._post( - "/audio/translations", - body=await async_maybe_transform(body, audio_translate_audio_params.AudioTranslateAudioParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, AudioTranslateAudioResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - -class AudioResourceWithRawResponse: - def __init__(self, audio: AudioResource) -> None: - self._audio = audio - - self.generate_speech = to_custom_raw_response_wrapper( - audio.generate_speech, - BinaryAPIResponse, - ) - self.transcribe_audio = to_raw_response_wrapper( - audio.transcribe_audio, - ) - self.translate_audio = to_raw_response_wrapper( - audio.translate_audio, - ) - - -class AsyncAudioResourceWithRawResponse: - def __init__(self, audio: AsyncAudioResource) -> None: - self._audio = audio - - self.generate_speech = async_to_custom_raw_response_wrapper( - audio.generate_speech, - AsyncBinaryAPIResponse, - ) - self.transcribe_audio = async_to_raw_response_wrapper( - audio.transcribe_audio, - ) - self.translate_audio = async_to_raw_response_wrapper( - audio.translate_audio, - ) - - -class AudioResourceWithStreamingResponse: - def __init__(self, audio: AudioResource) -> None: - self._audio = audio - - self.generate_speech = to_custom_streamed_response_wrapper( - audio.generate_speech, - StreamedBinaryAPIResponse, - ) - self.transcribe_audio = to_streamed_response_wrapper( - audio.transcribe_audio, - ) - self.translate_audio = to_streamed_response_wrapper( - audio.translate_audio, - ) - - -class AsyncAudioResourceWithStreamingResponse: - def __init__(self, audio: AsyncAudioResource) -> None: - self._audio = audio - - self.generate_speech = async_to_custom_streamed_response_wrapper( - audio.generate_speech, - AsyncStreamedBinaryAPIResponse, - ) - self.transcribe_audio = async_to_streamed_response_wrapper( - audio.transcribe_audio, - ) - self.translate_audio = async_to_streamed_response_wrapper( - audio.translate_audio, - ) diff --git a/src/digitalocean_genai_sdk/resources/batches.py b/src/digitalocean_genai_sdk/resources/batches.py deleted file mode 100644 index bd92c365..00000000 --- a/src/digitalocean_genai_sdk/resources/batches.py +++ /dev/null @@ -1,513 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Literal - -import httpx - -from ..types import batch_list_params, batch_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..types.batch import Batch -from .._base_client import make_request_options -from ..types.batch_list_response import BatchListResponse - -__all__ = ["BatchesResource", "AsyncBatchesResource"] - - -class BatchesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> BatchesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return BatchesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> BatchesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return BatchesResourceWithStreamingResponse(self) - - def create( - self, - *, - completion_window: Literal["24h"], - endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], - input_file_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """ - Creates and executes a batch from an uploaded file of requests - - Args: - completion_window: The time frame within which the batch should be processed. Currently only `24h` - is supported. - - endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - are supported. Note that `/v1/embeddings` batches are also restricted to a - maximum of 50,000 embedding inputs across all requests in the batch. - - input_file_id: The ID of an uploaded file that contains requests for the new batch. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your input file must be formatted as a - [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with - the purpose `batch`. The file can contain up to 50,000 requests, and can be up - to 200 MB in size. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/batches", - body=maybe_transform( - { - "completion_window": completion_window, - "endpoint": endpoint, - "input_file_id": input_file_id, - "metadata": metadata, - }, - batch_create_params.BatchCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Batch, - ) - - def retrieve( - self, - batch_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """ - Retrieves a batch. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._get( - f"/batches/{batch_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Batch, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BatchListResponse: - """List your organization's batches. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/batches", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - batch_list_params.BatchListParams, - ), - ), - cast_to=BatchListResponse, - ) - - def cancel( - self, - batch_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """Cancels an in-progress batch. - - The batch will be in status `cancelling` for up to - 10 minutes, before changing to `cancelled`, where it will have partial results - (if any) available in the output file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._post( - f"/batches/{batch_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Batch, - ) - - -class AsyncBatchesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncBatchesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncBatchesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncBatchesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncBatchesResourceWithStreamingResponse(self) - - async def create( - self, - *, - completion_window: Literal["24h"], - endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], - input_file_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """ - Creates and executes a batch from an uploaded file of requests - - Args: - completion_window: The time frame within which the batch should be processed. Currently only `24h` - is supported. - - endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` - are supported. Note that `/v1/embeddings` batches are also restricted to a - maximum of 50,000 embedding inputs across all requests in the batch. - - input_file_id: The ID of an uploaded file that contains requests for the new batch. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your input file must be formatted as a - [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with - the purpose `batch`. The file can contain up to 50,000 requests, and can be up - to 200 MB in size. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/batches", - body=await async_maybe_transform( - { - "completion_window": completion_window, - "endpoint": endpoint, - "input_file_id": input_file_id, - "metadata": metadata, - }, - batch_create_params.BatchCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Batch, - ) - - async def retrieve( - self, - batch_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """ - Retrieves a batch. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return await self._get( - f"/batches/{batch_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Batch, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BatchListResponse: - """List your organization's batches. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/batches", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - batch_list_params.BatchListParams, - ), - ), - cast_to=BatchListResponse, - ) - - async def cancel( - self, - batch_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """Cancels an in-progress batch. - - The batch will be in status `cancelling` for up to - 10 minutes, before changing to `cancelled`, where it will have partial results - (if any) available in the output file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return await self._post( - f"/batches/{batch_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Batch, - ) - - -class BatchesResourceWithRawResponse: - def __init__(self, batches: BatchesResource) -> None: - self._batches = batches - - self.create = to_raw_response_wrapper( - batches.create, - ) - self.retrieve = to_raw_response_wrapper( - batches.retrieve, - ) - self.list = to_raw_response_wrapper( - batches.list, - ) - self.cancel = to_raw_response_wrapper( - batches.cancel, - ) - - -class AsyncBatchesResourceWithRawResponse: - def __init__(self, batches: AsyncBatchesResource) -> None: - self._batches = batches - - self.create = async_to_raw_response_wrapper( - batches.create, - ) - self.retrieve = async_to_raw_response_wrapper( - batches.retrieve, - ) - self.list = async_to_raw_response_wrapper( - batches.list, - ) - self.cancel = async_to_raw_response_wrapper( - batches.cancel, - ) - - -class BatchesResourceWithStreamingResponse: - def __init__(self, batches: BatchesResource) -> None: - self._batches = batches - - self.create = to_streamed_response_wrapper( - batches.create, - ) - self.retrieve = to_streamed_response_wrapper( - batches.retrieve, - ) - self.list = to_streamed_response_wrapper( - batches.list, - ) - self.cancel = to_streamed_response_wrapper( - batches.cancel, - ) - - -class AsyncBatchesResourceWithStreamingResponse: - def __init__(self, batches: AsyncBatchesResource) -> None: - self._batches = batches - - self.create = async_to_streamed_response_wrapper( - batches.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - batches.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - batches.list, - ) - self.cancel = async_to_streamed_response_wrapper( - batches.cancel, - ) diff --git a/src/digitalocean_genai_sdk/resources/chat/completions.py b/src/digitalocean_genai_sdk/resources/chat/completions.py index b89b8f9e..effaf077 100644 --- a/src/digitalocean_genai_sdk/resources/chat/completions.py +++ b/src/digitalocean_genai_sdk/resources/chat/completions.py @@ -2,12 +2,10 @@ from __future__ import annotations -from typing import Dict, List, Iterable, Optional -from typing_extensions import Literal +from typing import Dict, Iterable, Optional import httpx -from ...types import ReasoningEffort from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property @@ -18,21 +16,11 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...types.chat import ( - completion_list_params, - completion_create_params, - completion_update_params, - completion_list_messages_params, -) +from ...types.chat import completion_create_params from ..._base_client import make_request_options -from ...types.reasoning_effort import ReasoningEffort from ...types.chat.create_response import CreateResponse from ...types.stop_configuration_param import StopConfigurationParam -from ...types.chat.model_ids_shared_param import ModelIDsSharedParam -from ...types.chat.completion_list_response import CompletionListResponse -from ...types.chat.completion_delete_response import CompletionDeleteResponse from ...types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam -from ...types.chat.completion_list_messages_response import CompletionListMessagesResponse __all__ = ["CompletionsResource", "AsyncCompletionsResource"] @@ -61,36 +49,22 @@ def create( self, *, messages: Iterable[completion_create_params.Message], - model: ModelIDsSharedParam, - audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN, + model: str, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -99,60 +73,17 @@ def create( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateResponse: """ - **Starting a new project?** We recommend trying - [Responses](/docs/api-reference/responses) to take advantage of the latest - OpenAI platform features. Compare - [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). - - --- - - Creates a model response for the given chat conversation. Learn more in the - [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), - and [audio](/docs/guides/audio) guides. - - Parameter support can differ depending on the model used to generate the - response, particularly for newer reasoning models. Parameters that are only - supported for reasoning models are noted below. For the current state of - unsupported parameters in reasoning models, - [refer to the reasoning guide](/docs/guides/reasoning). + Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. Depending on the - [model](/docs/models) you use, different message types (modalities) are - supported, like [text](/docs/guides/text-generation), - [images](/docs/guides/vision), and [audio](/docs/guides/audio). - - model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the [model guide](/docs/models) to browse and compare - available models. + messages: A list of messages comprising the conversation so far. - audio: Parameters for audio output. Required when audio output is requested with - `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + model: Model ID used to generate the response. frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - function_call: Deprecated in favor of `tool_choice`. - - Controls which (if any) function is called by the model. - - `none` means the model will not call a function and instead generates a message. - - `auto` means the model can pick between generating a message or calling a - function. - - Specifying a particular function via `{"name": "my_function"}` forces the model - to call that function. - - `none` is the default when no functions are present. `auto` is the default if - functions are present. - - functions: Deprecated in favor of `tools`. - - A list of functions the model may generate JSON inputs for. - logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the @@ -166,15 +97,14 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. - max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. - max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. This value can be used to control - [costs](https://openai.com/api/pricing/) for text generated via API. + max_tokens: The maximum number of tokens that can be generated in the completion. - This value is now deprecated in favor of `max_completion_tokens`, and is not - compatible with [o1 series models](/docs/guides/reasoning). + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and @@ -183,84 +113,19 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate. Most models are capable - of generating text, which is the default: - - `["text"]` - - The `gpt-4o-audio-preview` model can also be used to - [generate audio](/docs/guides/audio). To request that this model generate both - text and audio responses, you can use: - - `["text", "audio"]` - n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - parallel_tool_calls: Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - - prediction: Static predicted output content, such as the content of a text file that is - being regenerated. - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: An object specifying the format that the model must output. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - - seed: This feature is in Beta. If specified, our system will make a best effort to - sample deterministically, such that repeated requests with the same `seed` and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the `system_fingerprint` response parameter to monitor changes - in the backend. - - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarentee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. - - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - store: Whether or not to store the output of this chat completion request for use in - our [model distillation](/docs/guides/distillation) or - [evals](/docs/guides/evals) products. - stream: If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the [Streaming section below](/docs/api-reference/chat/streaming) for more - information, along with the - [streaming responses](/docs/guides/streaming-responses) guide for more - information on how to handle the streaming events. + generated using server-sent events. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -269,20 +134,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -293,11 +144,8 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - web_search_options: This tool searches the web for relevant results to use in a response. Learn more - about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. extra_headers: Send extra headers @@ -313,35 +161,21 @@ def create( { "messages": messages, "model": model, - "audio": audio, "frequency_penalty": frequency_penalty, - "function_call": function_call, - "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, "metadata": metadata, - "modalities": modalities, "n": n, - "parallel_tool_calls": parallel_tool_calls, - "prediction": prediction, "presence_penalty": presence_penalty, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "seed": seed, - "service_tier": service_tier, "stop": stop, - "store": store, "stream": stream, "stream_options": stream_options, "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, - "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParams, ), @@ -351,241 +185,6 @@ def create( cast_to=CreateResponse, ) - def retrieve( - self, - completion_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateResponse: - """Get a stored chat completion. - - Only Chat Completions that have been created with - the `store` parameter set to `true` will be returned. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return self._get( - f"/chat/completions/{completion_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateResponse, - ) - - def update( - self, - completion_id: str, - *, - metadata: Optional[Dict[str, str]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateResponse: - """Modify a stored chat completion. - - Only Chat Completions that have been created - with the `store` parameter set to `true` can be modified. Currently, the only - supported modification is to update the `metadata` field. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return self._post( - f"/chat/completions/{completion_id}", - body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateResponse, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionListResponse: - """List stored Chat Completions. - - Only Chat Completions that have been stored with - the `store` parameter set to `true` will be returned. - - Args: - after: Identifier for the last chat completion from the previous pagination request. - - limit: Number of Chat Completions to retrieve. - - metadata: - A list of metadata keys to filter the Chat Completions by. Example: - - `metadata[key1]=value1&metadata[key2]=value2` - - model: The model used to generate the Chat Completions. - - order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - `desc` for descending order. Defaults to `asc`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/chat/completions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - "metadata": metadata, - "model": model, - "order": order, - }, - completion_list_params.CompletionListParams, - ), - ), - cast_to=CompletionListResponse, - ) - - def delete( - self, - completion_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionDeleteResponse: - """Delete a stored chat completion. - - Only Chat Completions that have been created - with the `store` parameter set to `true` can be deleted. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return self._delete( - f"/chat/completions/{completion_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CompletionDeleteResponse, - ) - - def list_messages( - self, - completion_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionListMessagesResponse: - """Get the messages in a stored chat completion. - - Only Chat Completions that have - been created with the `store` parameter set to `true` will be returned. - - Args: - after: Identifier for the last message from the previous pagination request. - - limit: Number of messages to retrieve. - - order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - for descending order. Defaults to `asc`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return self._get( - f"/chat/completions/{completion_id}/messages", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - }, - completion_list_messages_params.CompletionListMessagesParams, - ), - ), - cast_to=CompletionListMessagesResponse, - ) - class AsyncCompletionsResource(AsyncAPIResource): @cached_property @@ -611,36 +210,22 @@ async def create( self, *, messages: Iterable[completion_create_params.Message], - model: ModelIDsSharedParam, - audio: Optional[completion_create_params.Audio] | NotGiven = NOT_GIVEN, + model: str, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[completion_create_params.Prediction] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -649,60 +234,17 @@ async def create( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateResponse: """ - **Starting a new project?** We recommend trying - [Responses](/docs/api-reference/responses) to take advantage of the latest - OpenAI platform features. Compare - [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). - - --- - - Creates a model response for the given chat conversation. Learn more in the - [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), - and [audio](/docs/guides/audio) guides. - - Parameter support can differ depending on the model used to generate the - response, particularly for newer reasoning models. Parameters that are only - supported for reasoning models are noted below. For the current state of - unsupported parameters in reasoning models, - [refer to the reasoning guide](/docs/guides/reasoning). + Creates a model response for the given chat conversation. Args: - messages: A list of messages comprising the conversation so far. Depending on the - [model](/docs/models) you use, different message types (modalities) are - supported, like [text](/docs/guides/text-generation), - [images](/docs/guides/vision), and [audio](/docs/guides/audio). - - model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the [model guide](/docs/models) to browse and compare - available models. + messages: A list of messages comprising the conversation so far. - audio: Parameters for audio output. Required when audio output is requested with - `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + model: Model ID used to generate the response. frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - function_call: Deprecated in favor of `tool_choice`. - - Controls which (if any) function is called by the model. - - `none` means the model will not call a function and instead generates a message. - - `auto` means the model can pick between generating a message or calling a - function. - - Specifying a particular function via `{"name": "my_function"}` forces the model - to call that function. - - `none` is the default when no functions are present. `auto` is the default if - functions are present. - - functions: Deprecated in favor of `tools`. - - A list of functions the model may generate JSON inputs for. - logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the @@ -716,15 +258,14 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. - max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. - max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. This value can be used to control - [costs](https://openai.com/api/pricing/) for text generated via API. + max_tokens: The maximum number of tokens that can be generated in the completion. - This value is now deprecated in favor of `max_completion_tokens`, and is not - compatible with [o1 series models](/docs/guides/reasoning). + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and @@ -733,84 +274,19 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. - modalities: Output types that you would like the model to generate. Most models are capable - of generating text, which is the default: - - `["text"]` - - The `gpt-4o-audio-preview` model can also be used to - [generate audio](/docs/guides/audio). To request that this model generate both - text and audio responses, you can use: - - `["text", "audio"]` - n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - parallel_tool_calls: Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - - prediction: Static predicted output content, such as the content of a text file that is - being regenerated. - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: An object specifying the format that the model must output. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - - seed: This feature is in Beta. If specified, our system will make a best effort to - sample deterministically, such that repeated requests with the same `seed` and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the `system_fingerprint` response parameter to monitor changes - in the backend. - - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarentee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. - - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - store: Whether or not to store the output of this chat completion request for use in - our [model distillation](/docs/guides/distillation) or - [evals](/docs/guides/evals) products. - stream: If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the [Streaming section below](/docs/api-reference/chat/streaming) for more - information, along with the - [streaming responses](/docs/guides/streaming-responses) guide for more - information on how to handle the streaming events. + generated using server-sent events. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -819,20 +295,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -843,11 +305,8 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - web_search_options: This tool searches the web for relevant results to use in a response. Learn more - about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. extra_headers: Send extra headers @@ -863,35 +322,21 @@ async def create( { "messages": messages, "model": model, - "audio": audio, "frequency_penalty": frequency_penalty, - "function_call": function_call, - "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, "metadata": metadata, - "modalities": modalities, "n": n, - "parallel_tool_calls": parallel_tool_calls, - "prediction": prediction, "presence_penalty": presence_penalty, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "seed": seed, - "service_tier": service_tier, "stop": stop, - "store": store, "stream": stream, "stream_options": stream_options, "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, - "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParams, ), @@ -901,241 +346,6 @@ async def create( cast_to=CreateResponse, ) - async def retrieve( - self, - completion_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateResponse: - """Get a stored chat completion. - - Only Chat Completions that have been created with - the `store` parameter set to `true` will be returned. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return await self._get( - f"/chat/completions/{completion_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateResponse, - ) - - async def update( - self, - completion_id: str, - *, - metadata: Optional[Dict[str, str]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateResponse: - """Modify a stored chat completion. - - Only Chat Completions that have been created - with the `store` parameter set to `true` can be modified. Currently, the only - supported modification is to update the `metadata` field. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return await self._post( - f"/chat/completions/{completion_id}", - body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CreateResponse, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionListResponse: - """List stored Chat Completions. - - Only Chat Completions that have been stored with - the `store` parameter set to `true` will be returned. - - Args: - after: Identifier for the last chat completion from the previous pagination request. - - limit: Number of Chat Completions to retrieve. - - metadata: - A list of metadata keys to filter the Chat Completions by. Example: - - `metadata[key1]=value1&metadata[key2]=value2` - - model: The model used to generate the Chat Completions. - - order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - `desc` for descending order. Defaults to `asc`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/chat/completions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - "metadata": metadata, - "model": model, - "order": order, - }, - completion_list_params.CompletionListParams, - ), - ), - cast_to=CompletionListResponse, - ) - - async def delete( - self, - completion_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionDeleteResponse: - """Delete a stored chat completion. - - Only Chat Completions that have been created - with the `store` parameter set to `true` can be deleted. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return await self._delete( - f"/chat/completions/{completion_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CompletionDeleteResponse, - ) - - async def list_messages( - self, - completion_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionListMessagesResponse: - """Get the messages in a stored chat completion. - - Only Chat Completions that have - been created with the `store` parameter set to `true` will be returned. - - Args: - after: Identifier for the last message from the previous pagination request. - - limit: Number of messages to retrieve. - - order: Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - for descending order. Defaults to `asc`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not completion_id: - raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}") - return await self._get( - f"/chat/completions/{completion_id}/messages", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - }, - completion_list_messages_params.CompletionListMessagesParams, - ), - ), - cast_to=CompletionListMessagesResponse, - ) - class CompletionsResourceWithRawResponse: def __init__(self, completions: CompletionsResource) -> None: @@ -1144,21 +354,6 @@ def __init__(self, completions: CompletionsResource) -> None: self.create = to_raw_response_wrapper( completions.create, ) - self.retrieve = to_raw_response_wrapper( - completions.retrieve, - ) - self.update = to_raw_response_wrapper( - completions.update, - ) - self.list = to_raw_response_wrapper( - completions.list, - ) - self.delete = to_raw_response_wrapper( - completions.delete, - ) - self.list_messages = to_raw_response_wrapper( - completions.list_messages, - ) class AsyncCompletionsResourceWithRawResponse: @@ -1168,21 +363,6 @@ def __init__(self, completions: AsyncCompletionsResource) -> None: self.create = async_to_raw_response_wrapper( completions.create, ) - self.retrieve = async_to_raw_response_wrapper( - completions.retrieve, - ) - self.update = async_to_raw_response_wrapper( - completions.update, - ) - self.list = async_to_raw_response_wrapper( - completions.list, - ) - self.delete = async_to_raw_response_wrapper( - completions.delete, - ) - self.list_messages = async_to_raw_response_wrapper( - completions.list_messages, - ) class CompletionsResourceWithStreamingResponse: @@ -1192,21 +372,6 @@ def __init__(self, completions: CompletionsResource) -> None: self.create = to_streamed_response_wrapper( completions.create, ) - self.retrieve = to_streamed_response_wrapper( - completions.retrieve, - ) - self.update = to_streamed_response_wrapper( - completions.update, - ) - self.list = to_streamed_response_wrapper( - completions.list, - ) - self.delete = to_streamed_response_wrapper( - completions.delete, - ) - self.list_messages = to_streamed_response_wrapper( - completions.list_messages, - ) class AsyncCompletionsResourceWithStreamingResponse: @@ -1216,18 +381,3 @@ def __init__(self, completions: AsyncCompletionsResource) -> None: self.create = async_to_streamed_response_wrapper( completions.create, ) - self.retrieve = async_to_streamed_response_wrapper( - completions.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - completions.update, - ) - self.list = async_to_streamed_response_wrapper( - completions.list, - ) - self.delete = async_to_streamed_response_wrapper( - completions.delete, - ) - self.list_messages = async_to_streamed_response_wrapper( - completions.list_messages, - ) diff --git a/src/digitalocean_genai_sdk/resources/completions.py b/src/digitalocean_genai_sdk/resources/completions.py deleted file mode 100644 index cde13a53..00000000 --- a/src/digitalocean_genai_sdk/resources/completions.py +++ /dev/null @@ -1,460 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal - -import httpx - -from ..types import completion_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.stop_configuration_param import StopConfigurationParam -from ..types.completion_create_response import CompletionCreateResponse -from ..types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam - -__all__ = ["CompletionsResource", "AsyncCompletionsResource"] - - -class CompletionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> CompletionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return CompletionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return CompletionsResourceWithStreamingResponse(self) - - def create( - self, - *, - model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse: - """ - Creates a completion for the provided prompt and parameters. - - Args: - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - prompt: The prompt(s) to generate completions for, encoded as a string, array of - strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during - training, so if a prompt is not specified the model will generate as if from the - beginning of a new document. - - best_of: Generates `best_of` completions server-side and returns the "best" (the one with - the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and - `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly - consume your token quota. Use carefully and ensure that you have reasonable - settings for `max_tokens` and `stop`. - - echo: Echo back the prompt in addition to the completion - - frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their - existing frequency in the text so far, decreasing the model's likelihood to - repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation) - - logit_bias: Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT - tokenizer) to an associated bias value from -100 to 100. You can use this - [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - Mathematically, the bias is added to the logits generated by the model prior to - sampling. The exact effect will vary per model, but values between -1 and 1 - should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - from being generated. - - logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as - well the chosen tokens. For example, if `logprobs` is 5, the API will return a - list of the 5 most likely tokens. The API will always return the `logprob` of - the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. - - max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the - completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. - - n: How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly - consume your token quota. Use carefully and ensure that you have reasonable - settings for `max_tokens` and `stop`. - - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on - whether they appear in the text so far, increasing the model's likelihood to - talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation) - - seed: If specified, our system will make a best effort to sample deterministically, - such that repeated requests with the same `seed` and parameters should return - the same result. - - Determinism is not guaranteed, and you should refer to the `system_fingerprint` - response parameter to monitor changes in the backend. - - stop: Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - - stream: Whether to stream back partial progress. If set, tokens will be sent as - data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - - stream_options: Options for streaming response. Only set this when you set `stream: true`. - - suffix: The suffix that comes after a completion of inserted text. - - This parameter is only supported for `gpt-3.5-turbo-instruct`. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/completions", - body=maybe_transform( - { - "model": model, - "prompt": prompt, - "best_of": best_of, - "echo": echo, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_tokens": max_tokens, - "n": n, - "presence_penalty": presence_penalty, - "seed": seed, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "suffix": suffix, - "temperature": temperature, - "top_p": top_p, - "user": user, - }, - completion_create_params.CompletionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CompletionCreateResponse, - ) - - -class AsyncCompletionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncCompletionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncCompletionsResourceWithStreamingResponse(self) - - async def create( - self, - *, - model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], - best_of: Optional[int] | NotGiven = NOT_GIVEN, - echo: Optional[bool] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse: - """ - Creates a completion for the provided prompt and parameters. - - Args: - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - prompt: The prompt(s) to generate completions for, encoded as a string, array of - strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during - training, so if a prompt is not specified the model will generate as if from the - beginning of a new document. - - best_of: Generates `best_of` completions server-side and returns the "best" (the one with - the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and - `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly - consume your token quota. Use carefully and ensure that you have reasonable - settings for `max_tokens` and `stop`. - - echo: Echo back the prompt in addition to the completion - - frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their - existing frequency in the text so far, decreasing the model's likelihood to - repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation) - - logit_bias: Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT - tokenizer) to an associated bias value from -100 to 100. You can use this - [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - Mathematically, the bias is added to the logits generated by the model prior to - sampling. The exact effect will vary per model, but values between -1 and 1 - should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - from being generated. - - logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as - well the chosen tokens. For example, if `logprobs` is 5, the API will return a - list of the 5 most likely tokens. The API will always return the `logprob` of - the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. - - max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the - completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. - - n: How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly - consume your token quota. Use carefully and ensure that you have reasonable - settings for `max_tokens` and `stop`. - - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on - whether they appear in the text so far, increasing the model's likelihood to - talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation) - - seed: If specified, our system will make a best effort to sample deterministically, - such that repeated requests with the same `seed` and parameters should return - the same result. - - Determinism is not guaranteed, and you should refer to the `system_fingerprint` - response parameter to monitor changes in the backend. - - stop: Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - - stream: Whether to stream back partial progress. If set, tokens will be sent as - data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - - stream_options: Options for streaming response. Only set this when you set `stream: true`. - - suffix: The suffix that comes after a completion of inserted text. - - This parameter is only supported for `gpt-3.5-turbo-instruct`. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/completions", - body=await async_maybe_transform( - { - "model": model, - "prompt": prompt, - "best_of": best_of, - "echo": echo, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_tokens": max_tokens, - "n": n, - "presence_penalty": presence_penalty, - "seed": seed, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "suffix": suffix, - "temperature": temperature, - "top_p": top_p, - "user": user, - }, - completion_create_params.CompletionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=CompletionCreateResponse, - ) - - -class CompletionsResourceWithRawResponse: - def __init__(self, completions: CompletionsResource) -> None: - self._completions = completions - - self.create = to_raw_response_wrapper( - completions.create, - ) - - -class AsyncCompletionsResourceWithRawResponse: - def __init__(self, completions: AsyncCompletionsResource) -> None: - self._completions = completions - - self.create = async_to_raw_response_wrapper( - completions.create, - ) - - -class CompletionsResourceWithStreamingResponse: - def __init__(self, completions: CompletionsResource) -> None: - self._completions = completions - - self.create = to_streamed_response_wrapper( - completions.create, - ) - - -class AsyncCompletionsResourceWithStreamingResponse: - def __init__(self, completions: AsyncCompletionsResource) -> None: - self._completions = completions - - self.create = async_to_streamed_response_wrapper( - completions.create, - ) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py index 7dc90e34..1bcd3145 100644 --- a/src/digitalocean_genai_sdk/resources/embeddings.py +++ b/src/digitalocean_genai_sdk/resources/embeddings.py @@ -2,8 +2,7 @@ from __future__ import annotations -from typing import List, Union, Iterable -from typing_extensions import Literal +from typing import List, Union import httpx @@ -47,10 +46,8 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: def create( self, *, - input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], - model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], - dimensions: int | NotGiven = NOT_GIVEN, - encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, + input: Union[str, List[str]], + model: str, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -64,26 +61,13 @@ def create( Args: input: Input text to embed, encoded as a string or array of tokens. To embed multiple - inputs in a single request, pass an array of strings or array of token arrays. - The input must not exceed the max input tokens for the model (8192 tokens for - `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - dimensions or less. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. Some models may also impose a limit on total number of - tokens summed across inputs. + inputs in a single request, pass an array of strings. - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. + model: ID of the model to use. You can use the List models API to see all of your + available models. - dimensions: The number of dimensions the resulting output embeddings should have. Only - supported in `text-embedding-3` and later models. - - encoding_format: The format to return the embeddings in. Can be either `float` or - [`base64`](https://pypi.org/project/pybase64/). - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. extra_headers: Send extra headers @@ -99,8 +83,6 @@ def create( { "input": input, "model": model, - "dimensions": dimensions, - "encoding_format": encoding_format, "user": user, }, embedding_create_params.EmbeddingCreateParams, @@ -135,10 +117,8 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons async def create( self, *, - input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], - model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], - dimensions: int | NotGiven = NOT_GIVEN, - encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, + input: Union[str, List[str]], + model: str, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -152,26 +132,13 @@ async def create( Args: input: Input text to embed, encoded as a string or array of tokens. To embed multiple - inputs in a single request, pass an array of strings or array of token arrays. - The input must not exceed the max input tokens for the model (8192 tokens for - `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - dimensions or less. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. Some models may also impose a limit on total number of - tokens summed across inputs. - - model: ID of the model to use. You can use the - [List models](/docs/api-reference/models/list) API to see all of your available - models, or see our [Model overview](/docs/models) for descriptions of them. - - dimensions: The number of dimensions the resulting output embeddings should have. Only - supported in `text-embedding-3` and later models. + inputs in a single request, pass an array of strings. - encoding_format: The format to return the embeddings in. Can be either `float` or - [`base64`](https://pypi.org/project/pybase64/). + model: ID of the model to use. You can use the List models API to see all of your + available models. - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. extra_headers: Send extra headers @@ -187,8 +154,6 @@ async def create( { "input": input, "model": model, - "dimensions": dimensions, - "encoding_format": encoding_format, "user": user, }, embedding_create_params.EmbeddingCreateParams, diff --git a/src/digitalocean_genai_sdk/resources/files.py b/src/digitalocean_genai_sdk/resources/files.py deleted file mode 100644 index a93712fd..00000000 --- a/src/digitalocean_genai_sdk/resources/files.py +++ /dev/null @@ -1,608 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Mapping, cast -from typing_extensions import Literal - -import httpx - -from ..types import file_list_params, file_upload_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.openai_file import OpenAIFile -from ..types.file_list_response import FileListResponse -from ..types.file_delete_response import FileDeleteResponse - -__all__ = ["FilesResource", "AsyncFilesResource"] - - -class FilesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> FilesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return FilesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FilesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return FilesResourceWithStreamingResponse(self) - - def retrieve( - self, - file_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OpenAIFile: - """ - Returns information about a specific file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._get( - f"/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OpenAIFile, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - purpose: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileListResponse: - """Returns a list of files. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 10,000, and the default is 10,000. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - purpose: Only return files with the given purpose. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/files", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - "purpose": purpose, - }, - file_list_params.FileListParams, - ), - ), - cast_to=FileListResponse, - ) - - def delete( - self, - file_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileDeleteResponse: - """ - Delete a file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._delete( - f"/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FileDeleteResponse, - ) - - def retrieve_content( - self, - file_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> str: - """ - Returns the contents of the specified file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._get( - f"/files/{file_id}/content", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=str, - ) - - def upload( - self, - *, - file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OpenAIFile: - """Upload a file that can be used across various endpoints. - - Individual files can be - up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. - - The Assistants API supports files up to 2 million tokens and of specific file - types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - - The Fine-tuning API only supports `.jsonl` files. The input also has certain - required formats for fine-tuning - [chat](/docs/api-reference/fine-tuning/chat-input) or - [completions](/docs/api-reference/fine-tuning/completions-input) models. - - The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - has a specific required [format](/docs/api-reference/batch/request-input). - - Please [contact us](https://help.openai.com/) if you need to increase these - storage limits. - - Args: - file: The File object (not file name) to be uploaded. - - purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the - Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - Flexible file type for any purpose - `evals`: Used for eval data sets - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "file": file, - "purpose": purpose, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/files", - body=maybe_transform(body, file_upload_params.FileUploadParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OpenAIFile, - ) - - -class AsyncFilesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFilesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncFilesResourceWithStreamingResponse(self) - - async def retrieve( - self, - file_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OpenAIFile: - """ - Returns information about a specific file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._get( - f"/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OpenAIFile, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - purpose: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileListResponse: - """Returns a list of files. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 10,000, and the default is 10,000. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - purpose: Only return files with the given purpose. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/files", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - "purpose": purpose, - }, - file_list_params.FileListParams, - ), - ), - cast_to=FileListResponse, - ) - - async def delete( - self, - file_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileDeleteResponse: - """ - Delete a file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._delete( - f"/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FileDeleteResponse, - ) - - async def retrieve_content( - self, - file_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> str: - """ - Returns the contents of the specified file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._get( - f"/files/{file_id}/content", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=str, - ) - - async def upload( - self, - *, - file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OpenAIFile: - """Upload a file that can be used across various endpoints. - - Individual files can be - up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. - - The Assistants API supports files up to 2 million tokens and of specific file - types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - - The Fine-tuning API only supports `.jsonl` files. The input also has certain - required formats for fine-tuning - [chat](/docs/api-reference/fine-tuning/chat-input) or - [completions](/docs/api-reference/fine-tuning/completions-input) models. - - The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - has a specific required [format](/docs/api-reference/batch/request-input). - - Please [contact us](https://help.openai.com/) if you need to increase these - storage limits. - - Args: - file: The File object (not file name) to be uploaded. - - purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the - Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - Flexible file type for any purpose - `evals`: Used for eval data sets - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "file": file, - "purpose": purpose, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/files", - body=await async_maybe_transform(body, file_upload_params.FileUploadParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OpenAIFile, - ) - - -class FilesResourceWithRawResponse: - def __init__(self, files: FilesResource) -> None: - self._files = files - - self.retrieve = to_raw_response_wrapper( - files.retrieve, - ) - self.list = to_raw_response_wrapper( - files.list, - ) - self.delete = to_raw_response_wrapper( - files.delete, - ) - self.retrieve_content = to_raw_response_wrapper( - files.retrieve_content, - ) - self.upload = to_raw_response_wrapper( - files.upload, - ) - - -class AsyncFilesResourceWithRawResponse: - def __init__(self, files: AsyncFilesResource) -> None: - self._files = files - - self.retrieve = async_to_raw_response_wrapper( - files.retrieve, - ) - self.list = async_to_raw_response_wrapper( - files.list, - ) - self.delete = async_to_raw_response_wrapper( - files.delete, - ) - self.retrieve_content = async_to_raw_response_wrapper( - files.retrieve_content, - ) - self.upload = async_to_raw_response_wrapper( - files.upload, - ) - - -class FilesResourceWithStreamingResponse: - def __init__(self, files: FilesResource) -> None: - self._files = files - - self.retrieve = to_streamed_response_wrapper( - files.retrieve, - ) - self.list = to_streamed_response_wrapper( - files.list, - ) - self.delete = to_streamed_response_wrapper( - files.delete, - ) - self.retrieve_content = to_streamed_response_wrapper( - files.retrieve_content, - ) - self.upload = to_streamed_response_wrapper( - files.upload, - ) - - -class AsyncFilesResourceWithStreamingResponse: - def __init__(self, files: AsyncFilesResource) -> None: - self._files = files - - self.retrieve = async_to_streamed_response_wrapper( - files.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - files.list, - ) - self.delete = async_to_streamed_response_wrapper( - files.delete, - ) - self.retrieve_content = async_to_streamed_response_wrapper( - files.retrieve_content, - ) - self.upload = async_to_streamed_response_wrapper( - files.upload, - ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py deleted file mode 100644 index 5f198d2e..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from .checkpoints import ( - CheckpointsResource, - AsyncCheckpointsResource, - CheckpointsResourceWithRawResponse, - AsyncCheckpointsResourceWithRawResponse, - CheckpointsResourceWithStreamingResponse, - AsyncCheckpointsResourceWithStreamingResponse, -) -from .fine_tuning import ( - FineTuningResource, - AsyncFineTuningResource, - FineTuningResourceWithRawResponse, - AsyncFineTuningResourceWithRawResponse, - FineTuningResourceWithStreamingResponse, - AsyncFineTuningResourceWithStreamingResponse, -) - -__all__ = [ - "CheckpointsResource", - "AsyncCheckpointsResource", - "CheckpointsResourceWithRawResponse", - "AsyncCheckpointsResourceWithRawResponse", - "CheckpointsResourceWithStreamingResponse", - "AsyncCheckpointsResourceWithStreamingResponse", - "JobsResource", - "AsyncJobsResource", - "JobsResourceWithRawResponse", - "AsyncJobsResourceWithRawResponse", - "JobsResourceWithStreamingResponse", - "AsyncJobsResourceWithStreamingResponse", - "FineTuningResource", - "AsyncFineTuningResource", - "FineTuningResourceWithRawResponse", - "AsyncFineTuningResourceWithRawResponse", - "FineTuningResourceWithStreamingResponse", - "AsyncFineTuningResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py deleted file mode 100644 index 3f6710f0..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .checkpoints import ( - CheckpointsResource, - AsyncCheckpointsResource, - CheckpointsResourceWithRawResponse, - AsyncCheckpointsResourceWithRawResponse, - CheckpointsResourceWithStreamingResponse, - AsyncCheckpointsResourceWithStreamingResponse, -) -from .permissions import ( - PermissionsResource, - AsyncPermissionsResource, - PermissionsResourceWithRawResponse, - AsyncPermissionsResourceWithRawResponse, - PermissionsResourceWithStreamingResponse, - AsyncPermissionsResourceWithStreamingResponse, -) - -__all__ = [ - "PermissionsResource", - "AsyncPermissionsResource", - "PermissionsResourceWithRawResponse", - "AsyncPermissionsResourceWithRawResponse", - "PermissionsResourceWithStreamingResponse", - "AsyncPermissionsResourceWithStreamingResponse", - "CheckpointsResource", - "AsyncCheckpointsResource", - "CheckpointsResourceWithRawResponse", - "AsyncCheckpointsResourceWithRawResponse", - "CheckpointsResourceWithStreamingResponse", - "AsyncCheckpointsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py deleted file mode 100644 index d2bd64ef..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/checkpoints.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ...._compat import cached_property -from .permissions import ( - PermissionsResource, - AsyncPermissionsResource, - PermissionsResourceWithRawResponse, - AsyncPermissionsResourceWithRawResponse, - PermissionsResourceWithStreamingResponse, - AsyncPermissionsResourceWithStreamingResponse, -) -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"] - - -class CheckpointsResource(SyncAPIResource): - @cached_property - def permissions(self) -> PermissionsResource: - return PermissionsResource(self._client) - - @cached_property - def with_raw_response(self) -> CheckpointsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return CheckpointsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return CheckpointsResourceWithStreamingResponse(self) - - -class AsyncCheckpointsResource(AsyncAPIResource): - @cached_property - def permissions(self) -> AsyncPermissionsResource: - return AsyncPermissionsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncCheckpointsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncCheckpointsResourceWithStreamingResponse(self) - - -class CheckpointsResourceWithRawResponse: - def __init__(self, checkpoints: CheckpointsResource) -> None: - self._checkpoints = checkpoints - - @cached_property - def permissions(self) -> PermissionsResourceWithRawResponse: - return PermissionsResourceWithRawResponse(self._checkpoints.permissions) - - -class AsyncCheckpointsResourceWithRawResponse: - def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: - self._checkpoints = checkpoints - - @cached_property - def permissions(self) -> AsyncPermissionsResourceWithRawResponse: - return AsyncPermissionsResourceWithRawResponse(self._checkpoints.permissions) - - -class CheckpointsResourceWithStreamingResponse: - def __init__(self, checkpoints: CheckpointsResource) -> None: - self._checkpoints = checkpoints - - @cached_property - def permissions(self) -> PermissionsResourceWithStreamingResponse: - return PermissionsResourceWithStreamingResponse(self._checkpoints.permissions) - - -class AsyncCheckpointsResourceWithStreamingResponse: - def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: - self._checkpoints = checkpoints - - @cached_property - def permissions(self) -> AsyncPermissionsResourceWithStreamingResponse: - return AsyncPermissionsResourceWithStreamingResponse(self._checkpoints.permissions) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py b/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py deleted file mode 100644 index 39e6a210..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/checkpoints/permissions.py +++ /dev/null @@ -1,401 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params -from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse -from ....types.fine_tuning.checkpoints.list_fine_tuning_checkpoint_permission import ListFineTuningCheckpointPermission - -__all__ = ["PermissionsResource", "AsyncPermissionsResource"] - - -class PermissionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> PermissionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return PermissionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> PermissionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return PermissionsResourceWithStreamingResponse(self) - - def create( - self, - permission_id: str, - *, - project_ids: List[str], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListFineTuningCheckpointPermission: - """ - **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). - - This enables organization owners to share fine-tuned models with other projects - in their organization. - - Args: - project_ids: The project identifiers to grant access to. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not permission_id: - raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") - return self._post( - f"/fine_tuning/checkpoints/{permission_id}/permissions", - body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListFineTuningCheckpointPermission, - ) - - def retrieve( - self, - permission_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListFineTuningCheckpointPermission: - """ - **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). - - Organization owners can use this endpoint to view all permissions for a - fine-tuned model checkpoint. - - Args: - after: Identifier for the last permission ID from the previous pagination request. - - limit: Number of permissions to retrieve. - - order: The order in which to retrieve permissions. - - project_id: The ID of the project to get permissions for. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not permission_id: - raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") - return self._get( - f"/fine_tuning/checkpoints/{permission_id}/permissions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - "project_id": project_id, - }, - permission_retrieve_params.PermissionRetrieveParams, - ), - ), - cast_to=ListFineTuningCheckpointPermission, - ) - - def delete( - self, - permission_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> PermissionDeleteResponse: - """ - **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). - - Organization owners can use this endpoint to delete a permission for a - fine-tuned model checkpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not permission_id: - raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") - return self._delete( - f"/fine_tuning/checkpoints/{permission_id}/permissions", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=PermissionDeleteResponse, - ) - - -class AsyncPermissionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncPermissionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncPermissionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncPermissionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncPermissionsResourceWithStreamingResponse(self) - - async def create( - self, - permission_id: str, - *, - project_ids: List[str], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListFineTuningCheckpointPermission: - """ - **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). - - This enables organization owners to share fine-tuned models with other projects - in their organization. - - Args: - project_ids: The project identifiers to grant access to. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not permission_id: - raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") - return await self._post( - f"/fine_tuning/checkpoints/{permission_id}/permissions", - body=await async_maybe_transform( - {"project_ids": project_ids}, permission_create_params.PermissionCreateParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ListFineTuningCheckpointPermission, - ) - - async def retrieve( - self, - permission_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["ascending", "descending"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListFineTuningCheckpointPermission: - """ - **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). - - Organization owners can use this endpoint to view all permissions for a - fine-tuned model checkpoint. - - Args: - after: Identifier for the last permission ID from the previous pagination request. - - limit: Number of permissions to retrieve. - - order: The order in which to retrieve permissions. - - project_id: The ID of the project to get permissions for. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not permission_id: - raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") - return await self._get( - f"/fine_tuning/checkpoints/{permission_id}/permissions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - "project_id": project_id, - }, - permission_retrieve_params.PermissionRetrieveParams, - ), - ), - cast_to=ListFineTuningCheckpointPermission, - ) - - async def delete( - self, - permission_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> PermissionDeleteResponse: - """ - **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). - - Organization owners can use this endpoint to delete a permission for a - fine-tuned model checkpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not permission_id: - raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}") - return await self._delete( - f"/fine_tuning/checkpoints/{permission_id}/permissions", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=PermissionDeleteResponse, - ) - - -class PermissionsResourceWithRawResponse: - def __init__(self, permissions: PermissionsResource) -> None: - self._permissions = permissions - - self.create = to_raw_response_wrapper( - permissions.create, - ) - self.retrieve = to_raw_response_wrapper( - permissions.retrieve, - ) - self.delete = to_raw_response_wrapper( - permissions.delete, - ) - - -class AsyncPermissionsResourceWithRawResponse: - def __init__(self, permissions: AsyncPermissionsResource) -> None: - self._permissions = permissions - - self.create = async_to_raw_response_wrapper( - permissions.create, - ) - self.retrieve = async_to_raw_response_wrapper( - permissions.retrieve, - ) - self.delete = async_to_raw_response_wrapper( - permissions.delete, - ) - - -class PermissionsResourceWithStreamingResponse: - def __init__(self, permissions: PermissionsResource) -> None: - self._permissions = permissions - - self.create = to_streamed_response_wrapper( - permissions.create, - ) - self.retrieve = to_streamed_response_wrapper( - permissions.retrieve, - ) - self.delete = to_streamed_response_wrapper( - permissions.delete, - ) - - -class AsyncPermissionsResourceWithStreamingResponse: - def __init__(self, permissions: AsyncPermissionsResource) -> None: - self._permissions = permissions - - self.create = async_to_streamed_response_wrapper( - permissions.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - permissions.retrieve, - ) - self.delete = async_to_streamed_response_wrapper( - permissions.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py b/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py deleted file mode 100644 index 9c097afe..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/fine_tuning.py +++ /dev/null @@ -1,134 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from .jobs.jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from .checkpoints.checkpoints import ( - CheckpointsResource, - AsyncCheckpointsResource, - CheckpointsResourceWithRawResponse, - AsyncCheckpointsResourceWithRawResponse, - CheckpointsResourceWithStreamingResponse, - AsyncCheckpointsResourceWithStreamingResponse, -) - -__all__ = ["FineTuningResource", "AsyncFineTuningResource"] - - -class FineTuningResource(SyncAPIResource): - @cached_property - def checkpoints(self) -> CheckpointsResource: - return CheckpointsResource(self._client) - - @cached_property - def jobs(self) -> JobsResource: - return JobsResource(self._client) - - @cached_property - def with_raw_response(self) -> FineTuningResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return FineTuningResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FineTuningResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return FineTuningResourceWithStreamingResponse(self) - - -class AsyncFineTuningResource(AsyncAPIResource): - @cached_property - def checkpoints(self) -> AsyncCheckpointsResource: - return AsyncCheckpointsResource(self._client) - - @cached_property - def jobs(self) -> AsyncJobsResource: - return AsyncJobsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncFineTuningResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFineTuningResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFineTuningResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncFineTuningResourceWithStreamingResponse(self) - - -class FineTuningResourceWithRawResponse: - def __init__(self, fine_tuning: FineTuningResource) -> None: - self._fine_tuning = fine_tuning - - @cached_property - def checkpoints(self) -> CheckpointsResourceWithRawResponse: - return CheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints) - - @cached_property - def jobs(self) -> JobsResourceWithRawResponse: - return JobsResourceWithRawResponse(self._fine_tuning.jobs) - - -class AsyncFineTuningResourceWithRawResponse: - def __init__(self, fine_tuning: AsyncFineTuningResource) -> None: - self._fine_tuning = fine_tuning - - @cached_property - def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse: - return AsyncCheckpointsResourceWithRawResponse(self._fine_tuning.checkpoints) - - @cached_property - def jobs(self) -> AsyncJobsResourceWithRawResponse: - return AsyncJobsResourceWithRawResponse(self._fine_tuning.jobs) - - -class FineTuningResourceWithStreamingResponse: - def __init__(self, fine_tuning: FineTuningResource) -> None: - self._fine_tuning = fine_tuning - - @cached_property - def checkpoints(self) -> CheckpointsResourceWithStreamingResponse: - return CheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints) - - @cached_property - def jobs(self) -> JobsResourceWithStreamingResponse: - return JobsResourceWithStreamingResponse(self._fine_tuning.jobs) - - -class AsyncFineTuningResourceWithStreamingResponse: - def __init__(self, fine_tuning: AsyncFineTuningResource) -> None: - self._fine_tuning = fine_tuning - - @cached_property - def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse: - return AsyncCheckpointsResourceWithStreamingResponse(self._fine_tuning.checkpoints) - - @cached_property - def jobs(self) -> AsyncJobsResourceWithStreamingResponse: - return AsyncJobsResourceWithStreamingResponse(self._fine_tuning.jobs) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py deleted file mode 100644 index 90e643d7..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .jobs import ( - JobsResource, - AsyncJobsResource, - JobsResourceWithRawResponse, - AsyncJobsResourceWithRawResponse, - JobsResourceWithStreamingResponse, - AsyncJobsResourceWithStreamingResponse, -) -from .events import ( - EventsResource, - AsyncEventsResource, - EventsResourceWithRawResponse, - AsyncEventsResourceWithRawResponse, - EventsResourceWithStreamingResponse, - AsyncEventsResourceWithStreamingResponse, -) -from .checkpoints import ( - CheckpointsResource, - AsyncCheckpointsResource, - CheckpointsResourceWithRawResponse, - AsyncCheckpointsResourceWithRawResponse, - CheckpointsResourceWithStreamingResponse, - AsyncCheckpointsResourceWithStreamingResponse, -) - -__all__ = [ - "CheckpointsResource", - "AsyncCheckpointsResource", - "CheckpointsResourceWithRawResponse", - "AsyncCheckpointsResourceWithRawResponse", - "CheckpointsResourceWithStreamingResponse", - "AsyncCheckpointsResourceWithStreamingResponse", - "EventsResource", - "AsyncEventsResource", - "EventsResourceWithRawResponse", - "AsyncEventsResourceWithRawResponse", - "EventsResourceWithStreamingResponse", - "AsyncEventsResourceWithStreamingResponse", - "JobsResource", - "AsyncJobsResource", - "JobsResourceWithRawResponse", - "AsyncJobsResourceWithRawResponse", - "JobsResourceWithStreamingResponse", - "AsyncJobsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py deleted file mode 100644 index adac27a5..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/checkpoints.py +++ /dev/null @@ -1,197 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.fine_tuning.jobs import checkpoint_retrieve_params -from ....types.fine_tuning.jobs.checkpoint_retrieve_response import CheckpointRetrieveResponse - -__all__ = ["CheckpointsResource", "AsyncCheckpointsResource"] - - -class CheckpointsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> CheckpointsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return CheckpointsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> CheckpointsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return CheckpointsResourceWithStreamingResponse(self) - - def retrieve( - self, - fine_tuning_job_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CheckpointRetrieveResponse: - """ - List checkpoints for a fine-tuning job. - - Args: - after: Identifier for the last checkpoint ID from the previous pagination request. - - limit: Number of checkpoints to retrieve. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - checkpoint_retrieve_params.CheckpointRetrieveParams, - ), - ), - cast_to=CheckpointRetrieveResponse, - ) - - -class AsyncCheckpointsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncCheckpointsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncCheckpointsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncCheckpointsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncCheckpointsResourceWithStreamingResponse(self) - - async def retrieve( - self, - fine_tuning_job_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CheckpointRetrieveResponse: - """ - List checkpoints for a fine-tuning job. - - Args: - after: Identifier for the last checkpoint ID from the previous pagination request. - - limit: Number of checkpoints to retrieve. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return await self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - checkpoint_retrieve_params.CheckpointRetrieveParams, - ), - ), - cast_to=CheckpointRetrieveResponse, - ) - - -class CheckpointsResourceWithRawResponse: - def __init__(self, checkpoints: CheckpointsResource) -> None: - self._checkpoints = checkpoints - - self.retrieve = to_raw_response_wrapper( - checkpoints.retrieve, - ) - - -class AsyncCheckpointsResourceWithRawResponse: - def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: - self._checkpoints = checkpoints - - self.retrieve = async_to_raw_response_wrapper( - checkpoints.retrieve, - ) - - -class CheckpointsResourceWithStreamingResponse: - def __init__(self, checkpoints: CheckpointsResource) -> None: - self._checkpoints = checkpoints - - self.retrieve = to_streamed_response_wrapper( - checkpoints.retrieve, - ) - - -class AsyncCheckpointsResourceWithStreamingResponse: - def __init__(self, checkpoints: AsyncCheckpointsResource) -> None: - self._checkpoints = checkpoints - - self.retrieve = async_to_streamed_response_wrapper( - checkpoints.retrieve, - ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py deleted file mode 100644 index 56d64766..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/events.py +++ /dev/null @@ -1,197 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.fine_tuning.jobs import event_retrieve_params -from ....types.fine_tuning.jobs.event_retrieve_response import EventRetrieveResponse - -__all__ = ["EventsResource", "AsyncEventsResource"] - - -class EventsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> EventsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return EventsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> EventsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return EventsResourceWithStreamingResponse(self) - - def retrieve( - self, - fine_tuning_job_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EventRetrieveResponse: - """ - Get status updates for a fine-tuning job. - - Args: - after: Identifier for the last event from the previous pagination request. - - limit: Number of events to retrieve. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}/events", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - event_retrieve_params.EventRetrieveParams, - ), - ), - cast_to=EventRetrieveResponse, - ) - - -class AsyncEventsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncEventsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncEventsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncEventsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncEventsResourceWithStreamingResponse(self) - - async def retrieve( - self, - fine_tuning_job_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EventRetrieveResponse: - """ - Get status updates for a fine-tuning job. - - Args: - after: Identifier for the last event from the previous pagination request. - - limit: Number of events to retrieve. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return await self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}/events", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - event_retrieve_params.EventRetrieveParams, - ), - ), - cast_to=EventRetrieveResponse, - ) - - -class EventsResourceWithRawResponse: - def __init__(self, events: EventsResource) -> None: - self._events = events - - self.retrieve = to_raw_response_wrapper( - events.retrieve, - ) - - -class AsyncEventsResourceWithRawResponse: - def __init__(self, events: AsyncEventsResource) -> None: - self._events = events - - self.retrieve = async_to_raw_response_wrapper( - events.retrieve, - ) - - -class EventsResourceWithStreamingResponse: - def __init__(self, events: EventsResource) -> None: - self._events = events - - self.retrieve = to_streamed_response_wrapper( - events.retrieve, - ) - - -class AsyncEventsResourceWithStreamingResponse: - def __init__(self, events: AsyncEventsResource) -> None: - self._events = events - - self.retrieve = async_to_streamed_response_wrapper( - events.retrieve, - ) diff --git a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py b/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py deleted file mode 100644 index 09670aa9..00000000 --- a/src/digitalocean_genai_sdk/resources/fine_tuning/jobs/jobs.py +++ /dev/null @@ -1,668 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable, Optional -from typing_extensions import Literal - -import httpx - -from .events import ( - EventsResource, - AsyncEventsResource, - EventsResourceWithRawResponse, - AsyncEventsResourceWithRawResponse, - EventsResourceWithStreamingResponse, - AsyncEventsResourceWithStreamingResponse, -) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from .checkpoints import ( - CheckpointsResource, - AsyncCheckpointsResource, - CheckpointsResourceWithRawResponse, - AsyncCheckpointsResourceWithRawResponse, - CheckpointsResourceWithStreamingResponse, - AsyncCheckpointsResourceWithStreamingResponse, -) -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.fine_tuning import job_list_params, job_create_params -from ....types.fine_tuning.fine_tuning_job import FineTuningJob -from ....types.fine_tuning.job_list_response import JobListResponse -from ....types.fine_tuning.fine_tune_method_param import FineTuneMethodParam - -__all__ = ["JobsResource", "AsyncJobsResource"] - - -class JobsResource(SyncAPIResource): - @cached_property - def checkpoints(self) -> CheckpointsResource: - return CheckpointsResource(self._client) - - @cached_property - def events(self) -> EventsResource: - return EventsResource(self._client) - - @cached_property - def with_raw_response(self) -> JobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return JobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> JobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return JobsResourceWithStreamingResponse(self) - - def create( - self, - *, - model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], - training_file: str, - hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, - integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - method: FineTuneMethodParam | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - validation_file: Optional[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - """ - Creates a fine-tuning job which begins the process of creating a new model from - a given dataset. - - Response includes details of the enqueued job including job status and the name - of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - - Args: - model: The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - - training_file: The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload - your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the - [chat](/docs/api-reference/fine-tuning/chat-input), - [completions](/docs/api-reference/fine-tuning/completions-input) format, or if - the fine-tuning method uses the - [preference](/docs/api-reference/fine-tuning/preference-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - - hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated - in favor of `method`, and should be passed in under the `method` parameter. - - integrations: A list of integrations to enable for your fine-tuning job. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - method: The method used for fine-tuning. - - seed: The seed controls the reproducibility of the job. Passing in the same seed and - job parameters should produce the same results, but may differ in rare cases. If - a seed is not specified, one will be generated for you. - - suffix: A string of up to 64 characters that will be added to your fine-tuned model - name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. - - validation_file: The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics - periodically during fine-tuning. These metrics can be viewed in the fine-tuning - results file. The same data should not be present in both train and validation - files. - - Your dataset must be formatted as a JSONL file. You must upload your file with - the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/fine_tuning/jobs", - body=maybe_transform( - { - "model": model, - "training_file": training_file, - "hyperparameters": hyperparameters, - "integrations": integrations, - "metadata": metadata, - "method": method, - "seed": seed, - "suffix": suffix, - "validation_file": validation_file, - }, - job_create_params.JobCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FineTuningJob, - ) - - def retrieve( - self, - fine_tuning_job_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - """ - Get info about a fine-tuning job. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FineTuningJob, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> JobListResponse: - """ - List your organization's fine-tuning jobs - - Args: - after: Identifier for the last job from the previous pagination request. - - limit: Number of fine-tuning jobs to retrieve. - - metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - Alternatively, set `metadata=null` to indicate no metadata. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/fine_tuning/jobs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - "metadata": metadata, - }, - job_list_params.JobListParams, - ), - ), - cast_to=JobListResponse, - ) - - def cancel( - self, - fine_tuning_job_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - """ - Immediately cancel a fine-tune job. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return self._post( - f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FineTuningJob, - ) - - -class AsyncJobsResource(AsyncAPIResource): - @cached_property - def checkpoints(self) -> AsyncCheckpointsResource: - return AsyncCheckpointsResource(self._client) - - @cached_property - def events(self) -> AsyncEventsResource: - return AsyncEventsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncJobsResourceWithStreamingResponse(self) - - async def create( - self, - *, - model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], - training_file: str, - hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, - integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - method: FineTuneMethodParam | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - validation_file: Optional[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - """ - Creates a fine-tuning job which begins the process of creating a new model from - a given dataset. - - Response includes details of the enqueued job including job status and the name - of the fine-tuned models once complete. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - - Args: - model: The name of the model to fine-tune. You can select one of the - [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - - training_file: The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload - your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the - [chat](/docs/api-reference/fine-tuning/chat-input), - [completions](/docs/api-reference/fine-tuning/completions-input) format, or if - the fine-tuning method uses the - [preference](/docs/api-reference/fine-tuning/preference-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - - hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated - in favor of `method`, and should be passed in under the `method` parameter. - - integrations: A list of integrations to enable for your fine-tuning job. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - method: The method used for fine-tuning. - - seed: The seed controls the reproducibility of the job. Passing in the same seed and - job parameters should produce the same results, but may differ in rare cases. If - a seed is not specified, one will be generated for you. - - suffix: A string of up to 64 characters that will be added to your fine-tuned model - name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. - - validation_file: The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics - periodically during fine-tuning. These metrics can be viewed in the fine-tuning - results file. The same data should not be present in both train and validation - files. - - Your dataset must be formatted as a JSONL file. You must upload your file with - the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/fine_tuning/jobs", - body=await async_maybe_transform( - { - "model": model, - "training_file": training_file, - "hyperparameters": hyperparameters, - "integrations": integrations, - "metadata": metadata, - "method": method, - "seed": seed, - "suffix": suffix, - "validation_file": validation_file, - }, - job_create_params.JobCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FineTuningJob, - ) - - async def retrieve( - self, - fine_tuning_job_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - """ - Get info about a fine-tuning job. - - [Learn more about fine-tuning](/docs/guides/fine-tuning) - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return await self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FineTuningJob, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> JobListResponse: - """ - List your organization's fine-tuning jobs - - Args: - after: Identifier for the last job from the previous pagination request. - - limit: Number of fine-tuning jobs to retrieve. - - metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - Alternatively, set `metadata=null` to indicate no metadata. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/fine_tuning/jobs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - "metadata": metadata, - }, - job_list_params.JobListParams, - ), - ), - cast_to=JobListResponse, - ) - - async def cancel( - self, - fine_tuning_job_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - """ - Immediately cancel a fine-tune job. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return await self._post( - f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FineTuningJob, - ) - - -class JobsResourceWithRawResponse: - def __init__(self, jobs: JobsResource) -> None: - self._jobs = jobs - - self.create = to_raw_response_wrapper( - jobs.create, - ) - self.retrieve = to_raw_response_wrapper( - jobs.retrieve, - ) - self.list = to_raw_response_wrapper( - jobs.list, - ) - self.cancel = to_raw_response_wrapper( - jobs.cancel, - ) - - @cached_property - def checkpoints(self) -> CheckpointsResourceWithRawResponse: - return CheckpointsResourceWithRawResponse(self._jobs.checkpoints) - - @cached_property - def events(self) -> EventsResourceWithRawResponse: - return EventsResourceWithRawResponse(self._jobs.events) - - -class AsyncJobsResourceWithRawResponse: - def __init__(self, jobs: AsyncJobsResource) -> None: - self._jobs = jobs - - self.create = async_to_raw_response_wrapper( - jobs.create, - ) - self.retrieve = async_to_raw_response_wrapper( - jobs.retrieve, - ) - self.list = async_to_raw_response_wrapper( - jobs.list, - ) - self.cancel = async_to_raw_response_wrapper( - jobs.cancel, - ) - - @cached_property - def checkpoints(self) -> AsyncCheckpointsResourceWithRawResponse: - return AsyncCheckpointsResourceWithRawResponse(self._jobs.checkpoints) - - @cached_property - def events(self) -> AsyncEventsResourceWithRawResponse: - return AsyncEventsResourceWithRawResponse(self._jobs.events) - - -class JobsResourceWithStreamingResponse: - def __init__(self, jobs: JobsResource) -> None: - self._jobs = jobs - - self.create = to_streamed_response_wrapper( - jobs.create, - ) - self.retrieve = to_streamed_response_wrapper( - jobs.retrieve, - ) - self.list = to_streamed_response_wrapper( - jobs.list, - ) - self.cancel = to_streamed_response_wrapper( - jobs.cancel, - ) - - @cached_property - def checkpoints(self) -> CheckpointsResourceWithStreamingResponse: - return CheckpointsResourceWithStreamingResponse(self._jobs.checkpoints) - - @cached_property - def events(self) -> EventsResourceWithStreamingResponse: - return EventsResourceWithStreamingResponse(self._jobs.events) - - -class AsyncJobsResourceWithStreamingResponse: - def __init__(self, jobs: AsyncJobsResource) -> None: - self._jobs = jobs - - self.create = async_to_streamed_response_wrapper( - jobs.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - jobs.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - jobs.list, - ) - self.cancel = async_to_streamed_response_wrapper( - jobs.cancel, - ) - - @cached_property - def checkpoints(self) -> AsyncCheckpointsResourceWithStreamingResponse: - return AsyncCheckpointsResourceWithStreamingResponse(self._jobs.checkpoints) - - @cached_property - def events(self) -> AsyncEventsResourceWithStreamingResponse: - return AsyncEventsResourceWithStreamingResponse(self._jobs.events) diff --git a/src/digitalocean_genai_sdk/resources/images.py b/src/digitalocean_genai_sdk/resources/images.py deleted file mode 100644 index 6003c71b..00000000 --- a/src/digitalocean_genai_sdk/resources/images.py +++ /dev/null @@ -1,592 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Mapping, Optional, cast -from typing_extensions import Literal - -import httpx - -from ..types import image_create_edit_params, image_create_variation_params, image_create_generation_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.images_response import ImagesResponse - -__all__ = ["ImagesResource", "AsyncImagesResource"] - - -class ImagesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ImagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ImagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ImagesResourceWithStreamingResponse(self) - - def create_edit( - self, - *, - image: FileTypes, - prompt: str, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates an edited or extended image given an original image and a prompt. - - Args: - image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - is not provided, image must have transparency, which will be used as the mask. - - prompt: A text description of the desired image(s). The maximum length is 1000 - characters. - - mask: An additional image whose fully transparent areas (e.g. where alpha is zero) - indicate where `image` should be edited. Must be a valid PNG file, less than - 4MB, and have the same dimensions as `image`. - - model: The model to use for image generation. Only `dall-e-2` is supported at this - time. - - n: The number of images to generate. Must be between 1 and 10. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "image": image, - "prompt": prompt, - "mask": mask, - "model": model, - "n": n, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/images/edits", - body=maybe_transform(body, image_create_edit_params.ImageCreateEditParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - def create_generation( - self, - *, - prompt: str, - model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates an image given a prompt. - - Args: - prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2` and 4000 characters for `dall-e-3`. - - model: The model to use for image generation. - - n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - `n=1` is supported. - - quality: The quality of the image that will be generated. `hd` creates images with finer - details and greater consistency across the image. This param is only supported - for `dall-e-3`. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - `1024x1792` for `dall-e-3` models. - - style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid - causes the model to lean towards generating hyper-real and dramatic images. - Natural causes the model to produce more natural, less hyper-real looking - images. This param is only supported for `dall-e-3`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/images/generations", - body=maybe_transform( - { - "prompt": prompt, - "model": model, - "n": n, - "quality": quality, - "response_format": response_format, - "size": size, - "style": style, - "user": user, - }, - image_create_generation_params.ImageCreateGenerationParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - def create_variation( - self, - *, - image: FileTypes, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates a variation of a given image. - - Args: - image: The image to use as the basis for the variation(s). Must be a valid PNG file, - less than 4MB, and square. - - model: The model to use for image generation. Only `dall-e-2` is supported at this - time. - - n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - `n=1` is supported. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "image": image, - "model": model, - "n": n, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/images/variations", - body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - -class AsyncImagesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncImagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncImagesResourceWithStreamingResponse(self) - - async def create_edit( - self, - *, - image: FileTypes, - prompt: str, - mask: FileTypes | NotGiven = NOT_GIVEN, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates an edited or extended image given an original image and a prompt. - - Args: - image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - is not provided, image must have transparency, which will be used as the mask. - - prompt: A text description of the desired image(s). The maximum length is 1000 - characters. - - mask: An additional image whose fully transparent areas (e.g. where alpha is zero) - indicate where `image` should be edited. Must be a valid PNG file, less than - 4MB, and have the same dimensions as `image`. - - model: The model to use for image generation. Only `dall-e-2` is supported at this - time. - - n: The number of images to generate. Must be between 1 and 10. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "image": image, - "prompt": prompt, - "mask": mask, - "model": model, - "n": n, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/images/edits", - body=await async_maybe_transform(body, image_create_edit_params.ImageCreateEditParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - async def create_generation( - self, - *, - prompt: str, - model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates an image given a prompt. - - Args: - prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2` and 4000 characters for `dall-e-3`. - - model: The model to use for image generation. - - n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - `n=1` is supported. - - quality: The quality of the image that will be generated. `hd` creates images with finer - details and greater consistency across the image. This param is only supported - for `dall-e-3`. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - `1024x1792` for `dall-e-3` models. - - style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid - causes the model to lean towards generating hyper-real and dramatic images. - Natural causes the model to produce more natural, less hyper-real looking - images. This param is only supported for `dall-e-3`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/images/generations", - body=await async_maybe_transform( - { - "prompt": prompt, - "model": model, - "n": n, - "quality": quality, - "response_format": response_format, - "size": size, - "style": style, - "user": user, - }, - image_create_generation_params.ImageCreateGenerationParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - async def create_variation( - self, - *, - image: FileTypes, - model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates a variation of a given image. - - Args: - image: The image to use as the basis for the variation(s). Must be a valid PNG file, - less than 4MB, and square. - - model: The model to use for image generation. Only `dall-e-2` is supported at this - time. - - n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - `n=1` is supported. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "image": image, - "model": model, - "n": n, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/images/variations", - body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - -class ImagesResourceWithRawResponse: - def __init__(self, images: ImagesResource) -> None: - self._images = images - - self.create_edit = to_raw_response_wrapper( - images.create_edit, - ) - self.create_generation = to_raw_response_wrapper( - images.create_generation, - ) - self.create_variation = to_raw_response_wrapper( - images.create_variation, - ) - - -class AsyncImagesResourceWithRawResponse: - def __init__(self, images: AsyncImagesResource) -> None: - self._images = images - - self.create_edit = async_to_raw_response_wrapper( - images.create_edit, - ) - self.create_generation = async_to_raw_response_wrapper( - images.create_generation, - ) - self.create_variation = async_to_raw_response_wrapper( - images.create_variation, - ) - - -class ImagesResourceWithStreamingResponse: - def __init__(self, images: ImagesResource) -> None: - self._images = images - - self.create_edit = to_streamed_response_wrapper( - images.create_edit, - ) - self.create_generation = to_streamed_response_wrapper( - images.create_generation, - ) - self.create_variation = to_streamed_response_wrapper( - images.create_variation, - ) - - -class AsyncImagesResourceWithStreamingResponse: - def __init__(self, images: AsyncImagesResource) -> None: - self._images = images - - self.create_edit = async_to_streamed_response_wrapper( - images.create_edit, - ) - self.create_generation = async_to_streamed_response_wrapper( - images.create_generation, - ) - self.create_variation = async_to_streamed_response_wrapper( - images.create_variation, - ) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py index 9bdebc56..81b75441 100644 --- a/src/digitalocean_genai_sdk/resources/models.py +++ b/src/digitalocean_genai_sdk/resources/models.py @@ -16,7 +16,6 @@ from ..types.model import Model from .._base_client import make_request_options from ..types.model_list_response import ModelListResponse -from ..types.model_delete_response import ModelDeleteResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -97,41 +96,6 @@ def list( cast_to=ModelListResponse, ) - def delete( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelDeleteResponse: - """Delete a fine-tuned model. - - You must have the Owner role in your organization to - delete a model. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return self._delete( - f"/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelDeleteResponse, - ) - class AsyncModelsResource(AsyncAPIResource): @cached_property @@ -209,41 +173,6 @@ async def list( cast_to=ModelListResponse, ) - async def delete( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelDeleteResponse: - """Delete a fine-tuned model. - - You must have the Owner role in your organization to - delete a model. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return await self._delete( - f"/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelDeleteResponse, - ) - class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: @@ -255,9 +184,6 @@ def __init__(self, models: ModelsResource) -> None: self.list = to_raw_response_wrapper( models.list, ) - self.delete = to_raw_response_wrapper( - models.delete, - ) class AsyncModelsResourceWithRawResponse: @@ -270,9 +196,6 @@ def __init__(self, models: AsyncModelsResource) -> None: self.list = async_to_raw_response_wrapper( models.list, ) - self.delete = async_to_raw_response_wrapper( - models.delete, - ) class ModelsResourceWithStreamingResponse: @@ -285,9 +208,6 @@ def __init__(self, models: ModelsResource) -> None: self.list = to_streamed_response_wrapper( models.list, ) - self.delete = to_streamed_response_wrapper( - models.delete, - ) class AsyncModelsResourceWithStreamingResponse: @@ -300,6 +220,3 @@ def __init__(self, models: AsyncModelsResource) -> None: self.list = async_to_streamed_response_wrapper( models.list, ) - self.delete = async_to_streamed_response_wrapper( - models.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/moderations.py b/src/digitalocean_genai_sdk/resources/moderations.py deleted file mode 100644 index 5979cc29..00000000 --- a/src/digitalocean_genai_sdk/resources/moderations.py +++ /dev/null @@ -1,216 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal - -import httpx - -from ..types import moderation_classify_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.moderation_classify_response import ModerationClassifyResponse - -__all__ = ["ModerationsResource", "AsyncModerationsResource"] - - -class ModerationsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ModerationsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ModerationsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ModerationsResourceWithStreamingResponse(self) - - def classify( - self, - *, - input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]], - model: Union[ - str, - Literal[ - "omni-moderation-latest", - "omni-moderation-2024-09-26", - "text-moderation-latest", - "text-moderation-stable", - ], - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModerationClassifyResponse: - """Classifies if text and/or image inputs are potentially harmful. - - Learn more in - the [moderation guide](/docs/guides/moderation). - - Args: - input: Input (or inputs) to classify. Can be a single string, an array of strings, or - an array of multi-modal input objects similar to other models. - - model: The content moderation model you would like to use. Learn more in - [the moderation guide](/docs/guides/moderation), and learn about available - models [here](/docs/models#moderation). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/moderations", - body=maybe_transform( - { - "input": input, - "model": model, - }, - moderation_classify_params.ModerationClassifyParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModerationClassifyResponse, - ) - - -class AsyncModerationsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncModerationsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncModerationsResourceWithStreamingResponse(self) - - async def classify( - self, - *, - input: Union[str, List[str], Iterable[moderation_classify_params.InputUnionMember2]], - model: Union[ - str, - Literal[ - "omni-moderation-latest", - "omni-moderation-2024-09-26", - "text-moderation-latest", - "text-moderation-stable", - ], - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModerationClassifyResponse: - """Classifies if text and/or image inputs are potentially harmful. - - Learn more in - the [moderation guide](/docs/guides/moderation). - - Args: - input: Input (or inputs) to classify. Can be a single string, an array of strings, or - an array of multi-modal input objects similar to other models. - - model: The content moderation model you would like to use. Learn more in - [the moderation guide](/docs/guides/moderation), and learn about available - models [here](/docs/models#moderation). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/moderations", - body=await async_maybe_transform( - { - "input": input, - "model": model, - }, - moderation_classify_params.ModerationClassifyParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModerationClassifyResponse, - ) - - -class ModerationsResourceWithRawResponse: - def __init__(self, moderations: ModerationsResource) -> None: - self._moderations = moderations - - self.classify = to_raw_response_wrapper( - moderations.classify, - ) - - -class AsyncModerationsResourceWithRawResponse: - def __init__(self, moderations: AsyncModerationsResource) -> None: - self._moderations = moderations - - self.classify = async_to_raw_response_wrapper( - moderations.classify, - ) - - -class ModerationsResourceWithStreamingResponse: - def __init__(self, moderations: ModerationsResource) -> None: - self._moderations = moderations - - self.classify = to_streamed_response_wrapper( - moderations.classify, - ) - - -class AsyncModerationsResourceWithStreamingResponse: - def __init__(self, moderations: AsyncModerationsResource) -> None: - self._moderations = moderations - - self.classify = async_to_streamed_response_wrapper( - moderations.classify, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/__init__.py b/src/digitalocean_genai_sdk/resources/organization/__init__.py deleted file mode 100644 index cf206d71..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .usage import ( - UsageResource, - AsyncUsageResource, - UsageResourceWithRawResponse, - AsyncUsageResourceWithRawResponse, - UsageResourceWithStreamingResponse, - AsyncUsageResourceWithStreamingResponse, -) -from .users import ( - UsersResource, - AsyncUsersResource, - UsersResourceWithRawResponse, - AsyncUsersResourceWithRawResponse, - UsersResourceWithStreamingResponse, - AsyncUsersResourceWithStreamingResponse, -) -from .invites import ( - InvitesResource, - AsyncInvitesResource, - InvitesResourceWithRawResponse, - AsyncInvitesResourceWithRawResponse, - InvitesResourceWithStreamingResponse, - AsyncInvitesResourceWithStreamingResponse, -) -from .projects import ( - ProjectsResource, - AsyncProjectsResource, - ProjectsResourceWithRawResponse, - AsyncProjectsResourceWithRawResponse, - ProjectsResourceWithStreamingResponse, - AsyncProjectsResourceWithStreamingResponse, -) -from .organization import ( - OrganizationResource, - AsyncOrganizationResource, - OrganizationResourceWithRawResponse, - AsyncOrganizationResourceWithRawResponse, - OrganizationResourceWithStreamingResponse, - AsyncOrganizationResourceWithStreamingResponse, -) -from .admin_api_keys import ( - AdminAPIKeysResource, - AsyncAdminAPIKeysResource, - AdminAPIKeysResourceWithRawResponse, - AsyncAdminAPIKeysResourceWithRawResponse, - AdminAPIKeysResourceWithStreamingResponse, - AsyncAdminAPIKeysResourceWithStreamingResponse, -) - -__all__ = [ - "AdminAPIKeysResource", - "AsyncAdminAPIKeysResource", - "AdminAPIKeysResourceWithRawResponse", - "AsyncAdminAPIKeysResourceWithRawResponse", - "AdminAPIKeysResourceWithStreamingResponse", - "AsyncAdminAPIKeysResourceWithStreamingResponse", - "InvitesResource", - "AsyncInvitesResource", - "InvitesResourceWithRawResponse", - "AsyncInvitesResourceWithRawResponse", - "InvitesResourceWithStreamingResponse", - "AsyncInvitesResourceWithStreamingResponse", - "ProjectsResource", - "AsyncProjectsResource", - "ProjectsResourceWithRawResponse", - "AsyncProjectsResourceWithRawResponse", - "ProjectsResourceWithStreamingResponse", - "AsyncProjectsResourceWithStreamingResponse", - "UsageResource", - "AsyncUsageResource", - "UsageResourceWithRawResponse", - "AsyncUsageResourceWithRawResponse", - "UsageResourceWithStreamingResponse", - "AsyncUsageResourceWithStreamingResponse", - "UsersResource", - "AsyncUsersResource", - "UsersResourceWithRawResponse", - "AsyncUsersResourceWithRawResponse", - "UsersResourceWithStreamingResponse", - "AsyncUsersResourceWithStreamingResponse", - "OrganizationResource", - "AsyncOrganizationResource", - "OrganizationResourceWithRawResponse", - "AsyncOrganizationResourceWithRawResponse", - "OrganizationResourceWithStreamingResponse", - "AsyncOrganizationResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py b/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py deleted file mode 100644 index d98c5fb7..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/admin_api_keys.py +++ /dev/null @@ -1,444 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.organization import admin_api_key_list_params, admin_api_key_create_params -from ...types.organization.admin_api_key import AdminAPIKey -from ...types.organization.admin_api_key_list_response import AdminAPIKeyListResponse -from ...types.organization.admin_api_key_delete_response import AdminAPIKeyDeleteResponse - -__all__ = ["AdminAPIKeysResource", "AsyncAdminAPIKeysResource"] - - -class AdminAPIKeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> AdminAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AdminAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AdminAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AdminAPIKeysResourceWithStreamingResponse(self) - - def create( - self, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKey: - """ - Create a new admin-level API key for the organization. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/organization/admin_api_keys", - body=maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AdminAPIKey, - ) - - def retrieve( - self, - key_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKey: - """ - Get details for a specific organization API key by its ID. - - Args: - key_id: The ID of the API key. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return self._get( - f"/organization/admin_api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AdminAPIKey, - ) - - def list( - self, - *, - after: Optional[str] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKeyListResponse: - """ - Retrieve a paginated list of organization admin API keys. - - Args: - after: Return keys with IDs that come after this ID in the pagination order. - - limit: Maximum number of keys to return. - - order: Order results by creation time, ascending or descending. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/admin_api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - }, - admin_api_key_list_params.AdminAPIKeyListParams, - ), - ), - cast_to=AdminAPIKeyListResponse, - ) - - def delete( - self, - key_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKeyDeleteResponse: - """ - Delete the specified admin API key. - - Args: - key_id: The ID of the API key to be deleted. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return self._delete( - f"/organization/admin_api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AdminAPIKeyDeleteResponse, - ) - - -class AsyncAdminAPIKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAdminAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAdminAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAdminAPIKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKey: - """ - Create a new admin-level API key for the organization. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/organization/admin_api_keys", - body=await async_maybe_transform({"name": name}, admin_api_key_create_params.AdminAPIKeyCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AdminAPIKey, - ) - - async def retrieve( - self, - key_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKey: - """ - Get details for a specific organization API key by its ID. - - Args: - key_id: The ID of the API key. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return await self._get( - f"/organization/admin_api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AdminAPIKey, - ) - - async def list( - self, - *, - after: Optional[str] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKeyListResponse: - """ - Retrieve a paginated list of organization admin API keys. - - Args: - after: Return keys with IDs that come after this ID in the pagination order. - - limit: Maximum number of keys to return. - - order: Order results by creation time, ascending or descending. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/admin_api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - "order": order, - }, - admin_api_key_list_params.AdminAPIKeyListParams, - ), - ), - cast_to=AdminAPIKeyListResponse, - ) - - async def delete( - self, - key_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AdminAPIKeyDeleteResponse: - """ - Delete the specified admin API key. - - Args: - key_id: The ID of the API key to be deleted. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return await self._delete( - f"/organization/admin_api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AdminAPIKeyDeleteResponse, - ) - - -class AdminAPIKeysResourceWithRawResponse: - def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None: - self._admin_api_keys = admin_api_keys - - self.create = to_raw_response_wrapper( - admin_api_keys.create, - ) - self.retrieve = to_raw_response_wrapper( - admin_api_keys.retrieve, - ) - self.list = to_raw_response_wrapper( - admin_api_keys.list, - ) - self.delete = to_raw_response_wrapper( - admin_api_keys.delete, - ) - - -class AsyncAdminAPIKeysResourceWithRawResponse: - def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None: - self._admin_api_keys = admin_api_keys - - self.create = async_to_raw_response_wrapper( - admin_api_keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - admin_api_keys.retrieve, - ) - self.list = async_to_raw_response_wrapper( - admin_api_keys.list, - ) - self.delete = async_to_raw_response_wrapper( - admin_api_keys.delete, - ) - - -class AdminAPIKeysResourceWithStreamingResponse: - def __init__(self, admin_api_keys: AdminAPIKeysResource) -> None: - self._admin_api_keys = admin_api_keys - - self.create = to_streamed_response_wrapper( - admin_api_keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - admin_api_keys.retrieve, - ) - self.list = to_streamed_response_wrapper( - admin_api_keys.list, - ) - self.delete = to_streamed_response_wrapper( - admin_api_keys.delete, - ) - - -class AsyncAdminAPIKeysResourceWithStreamingResponse: - def __init__(self, admin_api_keys: AsyncAdminAPIKeysResource) -> None: - self._admin_api_keys = admin_api_keys - - self.create = async_to_streamed_response_wrapper( - admin_api_keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - admin_api_keys.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - admin_api_keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - admin_api_keys.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/invites.py b/src/digitalocean_genai_sdk/resources/organization/invites.py deleted file mode 100644 index 392d4308..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/invites.py +++ /dev/null @@ -1,476 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.organization import invite_list_params, invite_create_params -from ...types.organization.invite import Invite -from ...types.organization.invite_list_response import InviteListResponse -from ...types.organization.invite_delete_response import InviteDeleteResponse - -__all__ = ["InvitesResource", "AsyncInvitesResource"] - - -class InvitesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> InvitesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return InvitesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> InvitesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return InvitesResourceWithStreamingResponse(self) - - def create( - self, - *, - email: str, - role: Literal["reader", "owner"], - projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Invite: - """Create an invite for a user to the organization. - - The invite must be accepted by - the user before they have access to the organization. - - Args: - email: Send an email to this address - - role: `owner` or `reader` - - projects: An array of projects to which membership is granted at the same time the org - invite is accepted. If omitted, the user will be invited to the default project - for compatibility with legacy behavior. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/organization/invites", - body=maybe_transform( - { - "email": email, - "role": role, - "projects": projects, - }, - invite_create_params.InviteCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Invite, - ) - - def retrieve( - self, - invite_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Invite: - """ - Retrieves an invite. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not invite_id: - raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") - return self._get( - f"/organization/invites/{invite_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Invite, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> InviteListResponse: - """ - Returns a list of invites in the organization. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/invites", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - invite_list_params.InviteListParams, - ), - ), - cast_to=InviteListResponse, - ) - - def delete( - self, - invite_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> InviteDeleteResponse: - """Delete an invite. - - If the invite has already been accepted, it cannot be deleted. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not invite_id: - raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") - return self._delete( - f"/organization/invites/{invite_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=InviteDeleteResponse, - ) - - -class AsyncInvitesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncInvitesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncInvitesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncInvitesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncInvitesResourceWithStreamingResponse(self) - - async def create( - self, - *, - email: str, - role: Literal["reader", "owner"], - projects: Iterable[invite_create_params.Project] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Invite: - """Create an invite for a user to the organization. - - The invite must be accepted by - the user before they have access to the organization. - - Args: - email: Send an email to this address - - role: `owner` or `reader` - - projects: An array of projects to which membership is granted at the same time the org - invite is accepted. If omitted, the user will be invited to the default project - for compatibility with legacy behavior. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/organization/invites", - body=await async_maybe_transform( - { - "email": email, - "role": role, - "projects": projects, - }, - invite_create_params.InviteCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Invite, - ) - - async def retrieve( - self, - invite_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Invite: - """ - Retrieves an invite. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not invite_id: - raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") - return await self._get( - f"/organization/invites/{invite_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Invite, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> InviteListResponse: - """ - Returns a list of invites in the organization. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/invites", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - invite_list_params.InviteListParams, - ), - ), - cast_to=InviteListResponse, - ) - - async def delete( - self, - invite_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> InviteDeleteResponse: - """Delete an invite. - - If the invite has already been accepted, it cannot be deleted. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not invite_id: - raise ValueError(f"Expected a non-empty value for `invite_id` but received {invite_id!r}") - return await self._delete( - f"/organization/invites/{invite_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=InviteDeleteResponse, - ) - - -class InvitesResourceWithRawResponse: - def __init__(self, invites: InvitesResource) -> None: - self._invites = invites - - self.create = to_raw_response_wrapper( - invites.create, - ) - self.retrieve = to_raw_response_wrapper( - invites.retrieve, - ) - self.list = to_raw_response_wrapper( - invites.list, - ) - self.delete = to_raw_response_wrapper( - invites.delete, - ) - - -class AsyncInvitesResourceWithRawResponse: - def __init__(self, invites: AsyncInvitesResource) -> None: - self._invites = invites - - self.create = async_to_raw_response_wrapper( - invites.create, - ) - self.retrieve = async_to_raw_response_wrapper( - invites.retrieve, - ) - self.list = async_to_raw_response_wrapper( - invites.list, - ) - self.delete = async_to_raw_response_wrapper( - invites.delete, - ) - - -class InvitesResourceWithStreamingResponse: - def __init__(self, invites: InvitesResource) -> None: - self._invites = invites - - self.create = to_streamed_response_wrapper( - invites.create, - ) - self.retrieve = to_streamed_response_wrapper( - invites.retrieve, - ) - self.list = to_streamed_response_wrapper( - invites.list, - ) - self.delete = to_streamed_response_wrapper( - invites.delete, - ) - - -class AsyncInvitesResourceWithStreamingResponse: - def __init__(self, invites: AsyncInvitesResource) -> None: - self._invites = invites - - self.create = async_to_streamed_response_wrapper( - invites.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - invites.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - invites.list, - ) - self.delete = async_to_streamed_response_wrapper( - invites.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/organization.py b/src/digitalocean_genai_sdk/resources/organization/organization.py deleted file mode 100644 index abb1d7c8..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/organization.py +++ /dev/null @@ -1,586 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from .usage import ( - UsageResource, - AsyncUsageResource, - UsageResourceWithRawResponse, - AsyncUsageResourceWithRawResponse, - UsageResourceWithStreamingResponse, - AsyncUsageResourceWithStreamingResponse, -) -from .users import ( - UsersResource, - AsyncUsersResource, - UsersResourceWithRawResponse, - AsyncUsersResourceWithRawResponse, - UsersResourceWithStreamingResponse, - AsyncUsersResourceWithStreamingResponse, -) -from ...types import organization_get_costs_params, organization_list_audit_logs_params -from .invites import ( - InvitesResource, - AsyncInvitesResource, - InvitesResourceWithRawResponse, - AsyncInvitesResourceWithRawResponse, - InvitesResourceWithStreamingResponse, - AsyncInvitesResourceWithStreamingResponse, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from .admin_api_keys import ( - AdminAPIKeysResource, - AsyncAdminAPIKeysResource, - AdminAPIKeysResourceWithRawResponse, - AsyncAdminAPIKeysResourceWithRawResponse, - AdminAPIKeysResourceWithStreamingResponse, - AsyncAdminAPIKeysResourceWithStreamingResponse, -) -from .projects.projects import ( - ProjectsResource, - AsyncProjectsResource, - ProjectsResourceWithRawResponse, - AsyncProjectsResourceWithRawResponse, - ProjectsResourceWithStreamingResponse, - AsyncProjectsResourceWithStreamingResponse, -) -from ...types.usage_response import UsageResponse -from ...types.audit_log_event_type import AuditLogEventType -from ...types.organization_list_audit_logs_response import OrganizationListAuditLogsResponse - -__all__ = ["OrganizationResource", "AsyncOrganizationResource"] - - -class OrganizationResource(SyncAPIResource): - @cached_property - def admin_api_keys(self) -> AdminAPIKeysResource: - return AdminAPIKeysResource(self._client) - - @cached_property - def invites(self) -> InvitesResource: - return InvitesResource(self._client) - - @cached_property - def projects(self) -> ProjectsResource: - return ProjectsResource(self._client) - - @cached_property - def usage(self) -> UsageResource: - return UsageResource(self._client) - - @cached_property - def users(self) -> UsersResource: - return UsersResource(self._client) - - @cached_property - def with_raw_response(self) -> OrganizationResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return OrganizationResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> OrganizationResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return OrganizationResourceWithStreamingResponse(self) - - def get_costs( - self, - *, - start_time: int, - bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get costs details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default - to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the costs by the specified fields. Support fields include `project_id`, - `line_item` and any combination of them. - - limit: A limit on the number of buckets to be returned. Limit can range between 1 and - 180, and the default is 7. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only costs for these projects. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/costs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "page": page, - "project_ids": project_ids, - }, - organization_get_costs_params.OrganizationGetCostsParams, - ), - ), - cast_to=UsageResponse, - ) - - def list_audit_logs( - self, - *, - actor_emails: List[str] | NotGiven = NOT_GIVEN, - actor_ids: List[str] | NotGiven = NOT_GIVEN, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN, - event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - resource_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OrganizationListAuditLogsResponse: - """ - List user actions and configuration changes within this organization. - - Args: - actor_emails: Return only events performed by users with these emails. - - actor_ids: Return only events performed by these actors. Can be a user ID, a service - account ID, or an api key tracking ID. - - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - effective_at: Return only events whose `effective_at` (Unix seconds) is in this range. - - event_types: Return only events with a `type` in one of these values. For example, - `project.created`. For all options, see the documentation for the - [audit log object](/docs/api-reference/audit-logs/object). - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - project_ids: Return only events for these projects. - - resource_ids: Return only events performed on these targets. For example, a project ID - updated. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/audit_logs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "actor_emails": actor_emails, - "actor_ids": actor_ids, - "after": after, - "before": before, - "effective_at": effective_at, - "event_types": event_types, - "limit": limit, - "project_ids": project_ids, - "resource_ids": resource_ids, - }, - organization_list_audit_logs_params.OrganizationListAuditLogsParams, - ), - ), - cast_to=OrganizationListAuditLogsResponse, - ) - - -class AsyncOrganizationResource(AsyncAPIResource): - @cached_property - def admin_api_keys(self) -> AsyncAdminAPIKeysResource: - return AsyncAdminAPIKeysResource(self._client) - - @cached_property - def invites(self) -> AsyncInvitesResource: - return AsyncInvitesResource(self._client) - - @cached_property - def projects(self) -> AsyncProjectsResource: - return AsyncProjectsResource(self._client) - - @cached_property - def usage(self) -> AsyncUsageResource: - return AsyncUsageResource(self._client) - - @cached_property - def users(self) -> AsyncUsersResource: - return AsyncUsersResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncOrganizationResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncOrganizationResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncOrganizationResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncOrganizationResourceWithStreamingResponse(self) - - async def get_costs( - self, - *, - start_time: int, - bucket_width: Literal["1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "line_item"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get costs details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - bucket_width: Width of each time bucket in response. Currently only `1d` is supported, default - to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the costs by the specified fields. Support fields include `project_id`, - `line_item` and any combination of them. - - limit: A limit on the number of buckets to be returned. Limit can range between 1 and - 180, and the default is 7. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only costs for these projects. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/costs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "page": page, - "project_ids": project_ids, - }, - organization_get_costs_params.OrganizationGetCostsParams, - ), - ), - cast_to=UsageResponse, - ) - - async def list_audit_logs( - self, - *, - actor_emails: List[str] | NotGiven = NOT_GIVEN, - actor_ids: List[str] | NotGiven = NOT_GIVEN, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - effective_at: organization_list_audit_logs_params.EffectiveAt | NotGiven = NOT_GIVEN, - event_types: List[AuditLogEventType] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - resource_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OrganizationListAuditLogsResponse: - """ - List user actions and configuration changes within this organization. - - Args: - actor_emails: Return only events performed by users with these emails. - - actor_ids: Return only events performed by these actors. Can be a user ID, a service - account ID, or an api key tracking ID. - - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - effective_at: Return only events whose `effective_at` (Unix seconds) is in this range. - - event_types: Return only events with a `type` in one of these values. For example, - `project.created`. For all options, see the documentation for the - [audit log object](/docs/api-reference/audit-logs/object). - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - project_ids: Return only events for these projects. - - resource_ids: Return only events performed on these targets. For example, a project ID - updated. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/audit_logs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "actor_emails": actor_emails, - "actor_ids": actor_ids, - "after": after, - "before": before, - "effective_at": effective_at, - "event_types": event_types, - "limit": limit, - "project_ids": project_ids, - "resource_ids": resource_ids, - }, - organization_list_audit_logs_params.OrganizationListAuditLogsParams, - ), - ), - cast_to=OrganizationListAuditLogsResponse, - ) - - -class OrganizationResourceWithRawResponse: - def __init__(self, organization: OrganizationResource) -> None: - self._organization = organization - - self.get_costs = to_raw_response_wrapper( - organization.get_costs, - ) - self.list_audit_logs = to_raw_response_wrapper( - organization.list_audit_logs, - ) - - @cached_property - def admin_api_keys(self) -> AdminAPIKeysResourceWithRawResponse: - return AdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys) - - @cached_property - def invites(self) -> InvitesResourceWithRawResponse: - return InvitesResourceWithRawResponse(self._organization.invites) - - @cached_property - def projects(self) -> ProjectsResourceWithRawResponse: - return ProjectsResourceWithRawResponse(self._organization.projects) - - @cached_property - def usage(self) -> UsageResourceWithRawResponse: - return UsageResourceWithRawResponse(self._organization.usage) - - @cached_property - def users(self) -> UsersResourceWithRawResponse: - return UsersResourceWithRawResponse(self._organization.users) - - -class AsyncOrganizationResourceWithRawResponse: - def __init__(self, organization: AsyncOrganizationResource) -> None: - self._organization = organization - - self.get_costs = async_to_raw_response_wrapper( - organization.get_costs, - ) - self.list_audit_logs = async_to_raw_response_wrapper( - organization.list_audit_logs, - ) - - @cached_property - def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithRawResponse: - return AsyncAdminAPIKeysResourceWithRawResponse(self._organization.admin_api_keys) - - @cached_property - def invites(self) -> AsyncInvitesResourceWithRawResponse: - return AsyncInvitesResourceWithRawResponse(self._organization.invites) - - @cached_property - def projects(self) -> AsyncProjectsResourceWithRawResponse: - return AsyncProjectsResourceWithRawResponse(self._organization.projects) - - @cached_property - def usage(self) -> AsyncUsageResourceWithRawResponse: - return AsyncUsageResourceWithRawResponse(self._organization.usage) - - @cached_property - def users(self) -> AsyncUsersResourceWithRawResponse: - return AsyncUsersResourceWithRawResponse(self._organization.users) - - -class OrganizationResourceWithStreamingResponse: - def __init__(self, organization: OrganizationResource) -> None: - self._organization = organization - - self.get_costs = to_streamed_response_wrapper( - organization.get_costs, - ) - self.list_audit_logs = to_streamed_response_wrapper( - organization.list_audit_logs, - ) - - @cached_property - def admin_api_keys(self) -> AdminAPIKeysResourceWithStreamingResponse: - return AdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys) - - @cached_property - def invites(self) -> InvitesResourceWithStreamingResponse: - return InvitesResourceWithStreamingResponse(self._organization.invites) - - @cached_property - def projects(self) -> ProjectsResourceWithStreamingResponse: - return ProjectsResourceWithStreamingResponse(self._organization.projects) - - @cached_property - def usage(self) -> UsageResourceWithStreamingResponse: - return UsageResourceWithStreamingResponse(self._organization.usage) - - @cached_property - def users(self) -> UsersResourceWithStreamingResponse: - return UsersResourceWithStreamingResponse(self._organization.users) - - -class AsyncOrganizationResourceWithStreamingResponse: - def __init__(self, organization: AsyncOrganizationResource) -> None: - self._organization = organization - - self.get_costs = async_to_streamed_response_wrapper( - organization.get_costs, - ) - self.list_audit_logs = async_to_streamed_response_wrapper( - organization.list_audit_logs, - ) - - @cached_property - def admin_api_keys(self) -> AsyncAdminAPIKeysResourceWithStreamingResponse: - return AsyncAdminAPIKeysResourceWithStreamingResponse(self._organization.admin_api_keys) - - @cached_property - def invites(self) -> AsyncInvitesResourceWithStreamingResponse: - return AsyncInvitesResourceWithStreamingResponse(self._organization.invites) - - @cached_property - def projects(self) -> AsyncProjectsResourceWithStreamingResponse: - return AsyncProjectsResourceWithStreamingResponse(self._organization.projects) - - @cached_property - def usage(self) -> AsyncUsageResourceWithStreamingResponse: - return AsyncUsageResourceWithStreamingResponse(self._organization.usage) - - @cached_property - def users(self) -> AsyncUsersResourceWithStreamingResponse: - return AsyncUsersResourceWithStreamingResponse(self._organization.users) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py b/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py deleted file mode 100644 index f3ceec3b..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/projects/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .users import ( - UsersResource, - AsyncUsersResource, - UsersResourceWithRawResponse, - AsyncUsersResourceWithRawResponse, - UsersResourceWithStreamingResponse, - AsyncUsersResourceWithStreamingResponse, -) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .projects import ( - ProjectsResource, - AsyncProjectsResource, - ProjectsResourceWithRawResponse, - AsyncProjectsResourceWithRawResponse, - ProjectsResourceWithStreamingResponse, - AsyncProjectsResourceWithStreamingResponse, -) -from .rate_limits import ( - RateLimitsResource, - AsyncRateLimitsResource, - RateLimitsResourceWithRawResponse, - AsyncRateLimitsResourceWithRawResponse, - RateLimitsResourceWithStreamingResponse, - AsyncRateLimitsResourceWithStreamingResponse, -) -from .service_accounts import ( - ServiceAccountsResource, - AsyncServiceAccountsResource, - ServiceAccountsResourceWithRawResponse, - AsyncServiceAccountsResourceWithRawResponse, - ServiceAccountsResourceWithStreamingResponse, - AsyncServiceAccountsResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", - "RateLimitsResource", - "AsyncRateLimitsResource", - "RateLimitsResourceWithRawResponse", - "AsyncRateLimitsResourceWithRawResponse", - "RateLimitsResourceWithStreamingResponse", - "AsyncRateLimitsResourceWithStreamingResponse", - "ServiceAccountsResource", - "AsyncServiceAccountsResource", - "ServiceAccountsResourceWithRawResponse", - "AsyncServiceAccountsResourceWithRawResponse", - "ServiceAccountsResourceWithStreamingResponse", - "AsyncServiceAccountsResourceWithStreamingResponse", - "UsersResource", - "AsyncUsersResource", - "UsersResourceWithRawResponse", - "AsyncUsersResourceWithRawResponse", - "UsersResourceWithStreamingResponse", - "AsyncUsersResourceWithStreamingResponse", - "ProjectsResource", - "AsyncProjectsResource", - "ProjectsResourceWithRawResponse", - "AsyncProjectsResourceWithRawResponse", - "ProjectsResourceWithStreamingResponse", - "AsyncProjectsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py b/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py deleted file mode 100644 index 4369beeb..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/projects/api_keys.py +++ /dev/null @@ -1,375 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.organization.projects import api_key_list_params -from ....types.organization.projects.api_key import APIKey -from ....types.organization.projects.api_key_list_response import APIKeyListResponse -from ....types.organization.projects.api_key_delete_response import APIKeyDeleteResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def retrieve( - self, - key_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKey: - """ - Retrieves an API key in the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return self._get( - f"/organization/projects/{project_id}/api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKey, - ) - - def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - Returns a list of API keys in the project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._get( - f"/organization/projects/{project_id}/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - def delete( - self, - key_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - Deletes an API key from the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return self._delete( - f"/organization/projects/{project_id}/api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def retrieve( - self, - key_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKey: - """ - Retrieves an API key in the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return await self._get( - f"/organization/projects/{project_id}/api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKey, - ) - - async def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - Returns a list of API keys in the project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._get( - f"/organization/projects/{project_id}/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - async def delete( - self, - key_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - Deletes an API key from the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not key_id: - raise ValueError(f"Expected a non-empty value for `key_id` but received {key_id!r}") - return await self._delete( - f"/organization/projects/{project_id}/api_keys/{key_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.retrieve = to_raw_response_wrapper( - api_keys.retrieve, - ) - self.list = to_raw_response_wrapper( - api_keys.list, - ) - self.delete = to_raw_response_wrapper( - api_keys.delete, - ) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.retrieve = async_to_raw_response_wrapper( - api_keys.retrieve, - ) - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - self.delete = async_to_raw_response_wrapper( - api_keys.delete, - ) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.retrieve = to_streamed_response_wrapper( - api_keys.retrieve, - ) - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = to_streamed_response_wrapper( - api_keys.delete, - ) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.retrieve = async_to_streamed_response_wrapper( - api_keys.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - api_keys.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py b/src/digitalocean_genai_sdk/resources/organization/projects/projects.py deleted file mode 100644 index 942c8773..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/projects/projects.py +++ /dev/null @@ -1,670 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from .users import ( - UsersResource, - AsyncUsersResource, - UsersResourceWithRawResponse, - AsyncUsersResourceWithRawResponse, - UsersResourceWithStreamingResponse, - AsyncUsersResourceWithStreamingResponse, -) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from .rate_limits import ( - RateLimitsResource, - AsyncRateLimitsResource, - RateLimitsResourceWithRawResponse, - AsyncRateLimitsResourceWithRawResponse, - RateLimitsResourceWithStreamingResponse, - AsyncRateLimitsResourceWithStreamingResponse, -) -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from .service_accounts import ( - ServiceAccountsResource, - AsyncServiceAccountsResource, - ServiceAccountsResourceWithRawResponse, - AsyncServiceAccountsResourceWithRawResponse, - ServiceAccountsResourceWithStreamingResponse, - AsyncServiceAccountsResourceWithStreamingResponse, -) -from ....types.organization import project_list_params, project_create_params, project_update_params -from ....types.organization.project import Project -from ....types.organization.project_list_response import ProjectListResponse - -__all__ = ["ProjectsResource", "AsyncProjectsResource"] - - -class ProjectsResource(SyncAPIResource): - @cached_property - def api_keys(self) -> APIKeysResource: - return APIKeysResource(self._client) - - @cached_property - def rate_limits(self) -> RateLimitsResource: - return RateLimitsResource(self._client) - - @cached_property - def service_accounts(self) -> ServiceAccountsResource: - return ServiceAccountsResource(self._client) - - @cached_property - def users(self) -> UsersResource: - return UsersResource(self._client) - - @cached_property - def with_raw_response(self) -> ProjectsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ProjectsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ProjectsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ProjectsResourceWithStreamingResponse(self) - - def create( - self, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """Create a new project in the organization. - - Projects can be created and archived, - but cannot be deleted. - - Args: - name: The friendly name of the project, this name appears in reports. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/organization/projects", - body=maybe_transform({"name": name}, project_create_params.ProjectCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - def retrieve( - self, - project_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """ - Retrieves a project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._get( - f"/organization/projects/{project_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - def update( - self, - project_id: str, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """ - Modifies a project in the organization. - - Args: - name: The updated name of the project, this name appears in reports. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._post( - f"/organization/projects/{project_id}", - body=maybe_transform({"name": name}, project_update_params.ProjectUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - include_archived: bool | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectListResponse: - """Returns a list of projects. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - include_archived: If `true` returns all projects including those that have been `archived`. - Archived projects are not included by default. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/projects", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "include_archived": include_archived, - "limit": limit, - }, - project_list_params.ProjectListParams, - ), - ), - cast_to=ProjectListResponse, - ) - - def archive( - self, - project_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """Archives a project in the organization. - - Archived projects cannot be used or - updated. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._post( - f"/organization/projects/{project_id}/archive", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - -class AsyncProjectsResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - return AsyncAPIKeysResource(self._client) - - @cached_property - def rate_limits(self) -> AsyncRateLimitsResource: - return AsyncRateLimitsResource(self._client) - - @cached_property - def service_accounts(self) -> AsyncServiceAccountsResource: - return AsyncServiceAccountsResource(self._client) - - @cached_property - def users(self) -> AsyncUsersResource: - return AsyncUsersResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncProjectsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncProjectsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncProjectsResourceWithStreamingResponse(self) - - async def create( - self, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """Create a new project in the organization. - - Projects can be created and archived, - but cannot be deleted. - - Args: - name: The friendly name of the project, this name appears in reports. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/organization/projects", - body=await async_maybe_transform({"name": name}, project_create_params.ProjectCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - async def retrieve( - self, - project_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """ - Retrieves a project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._get( - f"/organization/projects/{project_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - async def update( - self, - project_id: str, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """ - Modifies a project in the organization. - - Args: - name: The updated name of the project, this name appears in reports. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._post( - f"/organization/projects/{project_id}", - body=await async_maybe_transform({"name": name}, project_update_params.ProjectUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - include_archived: bool | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectListResponse: - """Returns a list of projects. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - include_archived: If `true` returns all projects including those that have been `archived`. - Archived projects are not included by default. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/projects", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "include_archived": include_archived, - "limit": limit, - }, - project_list_params.ProjectListParams, - ), - ), - cast_to=ProjectListResponse, - ) - - async def archive( - self, - project_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Project: - """Archives a project in the organization. - - Archived projects cannot be used or - updated. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._post( - f"/organization/projects/{project_id}/archive", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Project, - ) - - -class ProjectsResourceWithRawResponse: - def __init__(self, projects: ProjectsResource) -> None: - self._projects = projects - - self.create = to_raw_response_wrapper( - projects.create, - ) - self.retrieve = to_raw_response_wrapper( - projects.retrieve, - ) - self.update = to_raw_response_wrapper( - projects.update, - ) - self.list = to_raw_response_wrapper( - projects.list, - ) - self.archive = to_raw_response_wrapper( - projects.archive, - ) - - @cached_property - def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._projects.api_keys) - - @cached_property - def rate_limits(self) -> RateLimitsResourceWithRawResponse: - return RateLimitsResourceWithRawResponse(self._projects.rate_limits) - - @cached_property - def service_accounts(self) -> ServiceAccountsResourceWithRawResponse: - return ServiceAccountsResourceWithRawResponse(self._projects.service_accounts) - - @cached_property - def users(self) -> UsersResourceWithRawResponse: - return UsersResourceWithRawResponse(self._projects.users) - - -class AsyncProjectsResourceWithRawResponse: - def __init__(self, projects: AsyncProjectsResource) -> None: - self._projects = projects - - self.create = async_to_raw_response_wrapper( - projects.create, - ) - self.retrieve = async_to_raw_response_wrapper( - projects.retrieve, - ) - self.update = async_to_raw_response_wrapper( - projects.update, - ) - self.list = async_to_raw_response_wrapper( - projects.list, - ) - self.archive = async_to_raw_response_wrapper( - projects.archive, - ) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._projects.api_keys) - - @cached_property - def rate_limits(self) -> AsyncRateLimitsResourceWithRawResponse: - return AsyncRateLimitsResourceWithRawResponse(self._projects.rate_limits) - - @cached_property - def service_accounts(self) -> AsyncServiceAccountsResourceWithRawResponse: - return AsyncServiceAccountsResourceWithRawResponse(self._projects.service_accounts) - - @cached_property - def users(self) -> AsyncUsersResourceWithRawResponse: - return AsyncUsersResourceWithRawResponse(self._projects.users) - - -class ProjectsResourceWithStreamingResponse: - def __init__(self, projects: ProjectsResource) -> None: - self._projects = projects - - self.create = to_streamed_response_wrapper( - projects.create, - ) - self.retrieve = to_streamed_response_wrapper( - projects.retrieve, - ) - self.update = to_streamed_response_wrapper( - projects.update, - ) - self.list = to_streamed_response_wrapper( - projects.list, - ) - self.archive = to_streamed_response_wrapper( - projects.archive, - ) - - @cached_property - def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._projects.api_keys) - - @cached_property - def rate_limits(self) -> RateLimitsResourceWithStreamingResponse: - return RateLimitsResourceWithStreamingResponse(self._projects.rate_limits) - - @cached_property - def service_accounts(self) -> ServiceAccountsResourceWithStreamingResponse: - return ServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts) - - @cached_property - def users(self) -> UsersResourceWithStreamingResponse: - return UsersResourceWithStreamingResponse(self._projects.users) - - -class AsyncProjectsResourceWithStreamingResponse: - def __init__(self, projects: AsyncProjectsResource) -> None: - self._projects = projects - - self.create = async_to_streamed_response_wrapper( - projects.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - projects.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - projects.update, - ) - self.list = async_to_streamed_response_wrapper( - projects.list, - ) - self.archive = async_to_streamed_response_wrapper( - projects.archive, - ) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._projects.api_keys) - - @cached_property - def rate_limits(self) -> AsyncRateLimitsResourceWithStreamingResponse: - return AsyncRateLimitsResourceWithStreamingResponse(self._projects.rate_limits) - - @cached_property - def service_accounts(self) -> AsyncServiceAccountsResourceWithStreamingResponse: - return AsyncServiceAccountsResourceWithStreamingResponse(self._projects.service_accounts) - - @cached_property - def users(self) -> AsyncUsersResourceWithStreamingResponse: - return AsyncUsersResourceWithStreamingResponse(self._projects.users) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py b/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py deleted file mode 100644 index b15ef7ec..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/projects/rate_limits.py +++ /dev/null @@ -1,360 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.organization.projects import rate_limit_list_params, rate_limit_update_params -from ....types.organization.projects.rate_limit import RateLimit -from ....types.organization.projects.rate_limit_list_response import RateLimitListResponse - -__all__ = ["RateLimitsResource", "AsyncRateLimitsResource"] - - -class RateLimitsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> RateLimitsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return RateLimitsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RateLimitsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return RateLimitsResourceWithStreamingResponse(self) - - def update( - self, - rate_limit_id: str, - *, - project_id: str, - batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN, - max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN, - max_images_per_1_minute: int | NotGiven = NOT_GIVEN, - max_requests_per_1_day: int | NotGiven = NOT_GIVEN, - max_requests_per_1_minute: int | NotGiven = NOT_GIVEN, - max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RateLimit: - """ - Updates a project rate limit. - - Args: - batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models. - - max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models. - - max_images_per_1_minute: The maximum images per minute. Only relevant for certain models. - - max_requests_per_1_day: The maximum requests per day. Only relevant for certain models. - - max_requests_per_1_minute: The maximum requests per minute. - - max_tokens_per_1_minute: The maximum tokens per minute. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not rate_limit_id: - raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}") - return self._post( - f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}", - body=maybe_transform( - { - "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens, - "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute, - "max_images_per_1_minute": max_images_per_1_minute, - "max_requests_per_1_day": max_requests_per_1_day, - "max_requests_per_1_minute": max_requests_per_1_minute, - "max_tokens_per_1_minute": max_tokens_per_1_minute, - }, - rate_limit_update_params.RateLimitUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RateLimit, - ) - - def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RateLimitListResponse: - """ - Returns the rate limits per model for a project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - beginning with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. The default is 100. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._get( - f"/organization/projects/{project_id}/rate_limits", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - }, - rate_limit_list_params.RateLimitListParams, - ), - ), - cast_to=RateLimitListResponse, - ) - - -class AsyncRateLimitsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncRateLimitsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRateLimitsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRateLimitsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncRateLimitsResourceWithStreamingResponse(self) - - async def update( - self, - rate_limit_id: str, - *, - project_id: str, - batch_1_day_max_input_tokens: int | NotGiven = NOT_GIVEN, - max_audio_megabytes_per_1_minute: int | NotGiven = NOT_GIVEN, - max_images_per_1_minute: int | NotGiven = NOT_GIVEN, - max_requests_per_1_day: int | NotGiven = NOT_GIVEN, - max_requests_per_1_minute: int | NotGiven = NOT_GIVEN, - max_tokens_per_1_minute: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RateLimit: - """ - Updates a project rate limit. - - Args: - batch_1_day_max_input_tokens: The maximum batch input tokens per day. Only relevant for certain models. - - max_audio_megabytes_per_1_minute: The maximum audio megabytes per minute. Only relevant for certain models. - - max_images_per_1_minute: The maximum images per minute. Only relevant for certain models. - - max_requests_per_1_day: The maximum requests per day. Only relevant for certain models. - - max_requests_per_1_minute: The maximum requests per minute. - - max_tokens_per_1_minute: The maximum tokens per minute. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not rate_limit_id: - raise ValueError(f"Expected a non-empty value for `rate_limit_id` but received {rate_limit_id!r}") - return await self._post( - f"/organization/projects/{project_id}/rate_limits/{rate_limit_id}", - body=await async_maybe_transform( - { - "batch_1_day_max_input_tokens": batch_1_day_max_input_tokens, - "max_audio_megabytes_per_1_minute": max_audio_megabytes_per_1_minute, - "max_images_per_1_minute": max_images_per_1_minute, - "max_requests_per_1_day": max_requests_per_1_day, - "max_requests_per_1_minute": max_requests_per_1_minute, - "max_tokens_per_1_minute": max_tokens_per_1_minute, - }, - rate_limit_update_params.RateLimitUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RateLimit, - ) - - async def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RateLimitListResponse: - """ - Returns the rate limits per model for a project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - beginning with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. The default is 100. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._get( - f"/organization/projects/{project_id}/rate_limits", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - }, - rate_limit_list_params.RateLimitListParams, - ), - ), - cast_to=RateLimitListResponse, - ) - - -class RateLimitsResourceWithRawResponse: - def __init__(self, rate_limits: RateLimitsResource) -> None: - self._rate_limits = rate_limits - - self.update = to_raw_response_wrapper( - rate_limits.update, - ) - self.list = to_raw_response_wrapper( - rate_limits.list, - ) - - -class AsyncRateLimitsResourceWithRawResponse: - def __init__(self, rate_limits: AsyncRateLimitsResource) -> None: - self._rate_limits = rate_limits - - self.update = async_to_raw_response_wrapper( - rate_limits.update, - ) - self.list = async_to_raw_response_wrapper( - rate_limits.list, - ) - - -class RateLimitsResourceWithStreamingResponse: - def __init__(self, rate_limits: RateLimitsResource) -> None: - self._rate_limits = rate_limits - - self.update = to_streamed_response_wrapper( - rate_limits.update, - ) - self.list = to_streamed_response_wrapper( - rate_limits.list, - ) - - -class AsyncRateLimitsResourceWithStreamingResponse: - def __init__(self, rate_limits: AsyncRateLimitsResource) -> None: - self._rate_limits = rate_limits - - self.update = async_to_streamed_response_wrapper( - rate_limits.update, - ) - self.list = async_to_streamed_response_wrapper( - rate_limits.list, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py b/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py deleted file mode 100644 index d40c841f..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/projects/service_accounts.py +++ /dev/null @@ -1,466 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.organization.projects import service_account_list_params, service_account_create_params -from ....types.organization.projects.service_account import ServiceAccount -from ....types.organization.projects.service_account_list_response import ServiceAccountListResponse -from ....types.organization.projects.service_account_create_response import ServiceAccountCreateResponse -from ....types.organization.projects.service_account_delete_response import ServiceAccountDeleteResponse - -__all__ = ["ServiceAccountsResource", "AsyncServiceAccountsResource"] - - -class ServiceAccountsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ServiceAccountsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ServiceAccountsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ServiceAccountsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ServiceAccountsResourceWithStreamingResponse(self) - - def create( - self, - project_id: str, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccountCreateResponse: - """Creates a new service account in the project. - - This also returns an unredacted - API key for the service account. - - Args: - name: The name of the service account being created. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._post( - f"/organization/projects/{project_id}/service_accounts", - body=maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ServiceAccountCreateResponse, - ) - - def retrieve( - self, - service_account_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccount: - """ - Retrieves a service account in the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not service_account_id: - raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") - return self._get( - f"/organization/projects/{project_id}/service_accounts/{service_account_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ServiceAccount, - ) - - def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccountListResponse: - """ - Returns a list of service accounts in the project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._get( - f"/organization/projects/{project_id}/service_accounts", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - service_account_list_params.ServiceAccountListParams, - ), - ), - cast_to=ServiceAccountListResponse, - ) - - def delete( - self, - service_account_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccountDeleteResponse: - """ - Deletes a service account from the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not service_account_id: - raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") - return self._delete( - f"/organization/projects/{project_id}/service_accounts/{service_account_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ServiceAccountDeleteResponse, - ) - - -class AsyncServiceAccountsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncServiceAccountsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncServiceAccountsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncServiceAccountsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncServiceAccountsResourceWithStreamingResponse(self) - - async def create( - self, - project_id: str, - *, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccountCreateResponse: - """Creates a new service account in the project. - - This also returns an unredacted - API key for the service account. - - Args: - name: The name of the service account being created. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._post( - f"/organization/projects/{project_id}/service_accounts", - body=await async_maybe_transform({"name": name}, service_account_create_params.ServiceAccountCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ServiceAccountCreateResponse, - ) - - async def retrieve( - self, - service_account_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccount: - """ - Retrieves a service account in the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not service_account_id: - raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") - return await self._get( - f"/organization/projects/{project_id}/service_accounts/{service_account_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ServiceAccount, - ) - - async def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccountListResponse: - """ - Returns a list of service accounts in the project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._get( - f"/organization/projects/{project_id}/service_accounts", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - service_account_list_params.ServiceAccountListParams, - ), - ), - cast_to=ServiceAccountListResponse, - ) - - async def delete( - self, - service_account_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ServiceAccountDeleteResponse: - """ - Deletes a service account from the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not service_account_id: - raise ValueError(f"Expected a non-empty value for `service_account_id` but received {service_account_id!r}") - return await self._delete( - f"/organization/projects/{project_id}/service_accounts/{service_account_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ServiceAccountDeleteResponse, - ) - - -class ServiceAccountsResourceWithRawResponse: - def __init__(self, service_accounts: ServiceAccountsResource) -> None: - self._service_accounts = service_accounts - - self.create = to_raw_response_wrapper( - service_accounts.create, - ) - self.retrieve = to_raw_response_wrapper( - service_accounts.retrieve, - ) - self.list = to_raw_response_wrapper( - service_accounts.list, - ) - self.delete = to_raw_response_wrapper( - service_accounts.delete, - ) - - -class AsyncServiceAccountsResourceWithRawResponse: - def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None: - self._service_accounts = service_accounts - - self.create = async_to_raw_response_wrapper( - service_accounts.create, - ) - self.retrieve = async_to_raw_response_wrapper( - service_accounts.retrieve, - ) - self.list = async_to_raw_response_wrapper( - service_accounts.list, - ) - self.delete = async_to_raw_response_wrapper( - service_accounts.delete, - ) - - -class ServiceAccountsResourceWithStreamingResponse: - def __init__(self, service_accounts: ServiceAccountsResource) -> None: - self._service_accounts = service_accounts - - self.create = to_streamed_response_wrapper( - service_accounts.create, - ) - self.retrieve = to_streamed_response_wrapper( - service_accounts.retrieve, - ) - self.list = to_streamed_response_wrapper( - service_accounts.list, - ) - self.delete = to_streamed_response_wrapper( - service_accounts.delete, - ) - - -class AsyncServiceAccountsResourceWithStreamingResponse: - def __init__(self, service_accounts: AsyncServiceAccountsResource) -> None: - self._service_accounts = service_accounts - - self.create = async_to_streamed_response_wrapper( - service_accounts.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - service_accounts.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - service_accounts.list, - ) - self.delete = async_to_streamed_response_wrapper( - service_accounts.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/projects/users.py b/src/digitalocean_genai_sdk/resources/organization/projects/users.py deleted file mode 100644 index 6e40263a..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/projects/users.py +++ /dev/null @@ -1,577 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.organization.projects import user_add_params, user_list_params, user_update_params -from ....types.organization.projects.project_user import ProjectUser -from ....types.organization.projects.user_list_response import UserListResponse -from ....types.organization.projects.user_delete_response import UserDeleteResponse - -__all__ = ["UsersResource", "AsyncUsersResource"] - - -class UsersResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> UsersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return UsersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> UsersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return UsersResourceWithStreamingResponse(self) - - def retrieve( - self, - user_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectUser: - """ - Retrieves a user in the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return self._get( - f"/organization/projects/{project_id}/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ProjectUser, - ) - - def update( - self, - user_id: str, - *, - project_id: str, - role: Literal["owner", "member"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectUser: - """ - Modifies a user's role in the project. - - Args: - role: `owner` or `member` - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return self._post( - f"/organization/projects/{project_id}/users/{user_id}", - body=maybe_transform({"role": role}, user_update_params.UserUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ProjectUser, - ) - - def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserListResponse: - """ - Returns a list of users in the project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._get( - f"/organization/projects/{project_id}/users", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - user_list_params.UserListParams, - ), - ), - cast_to=UserListResponse, - ) - - def delete( - self, - user_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserDeleteResponse: - """ - Deletes a user from the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return self._delete( - f"/organization/projects/{project_id}/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=UserDeleteResponse, - ) - - def add( - self, - project_id: str, - *, - role: Literal["owner", "member"], - user_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectUser: - """Adds a user to the project. - - Users must already be members of the organization to - be added to a project. - - Args: - role: `owner` or `member` - - user_id: The ID of the user. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return self._post( - f"/organization/projects/{project_id}/users", - body=maybe_transform( - { - "role": role, - "user_id": user_id, - }, - user_add_params.UserAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ProjectUser, - ) - - -class AsyncUsersResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncUsersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncUsersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncUsersResourceWithStreamingResponse(self) - - async def retrieve( - self, - user_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectUser: - """ - Retrieves a user in the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return await self._get( - f"/organization/projects/{project_id}/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ProjectUser, - ) - - async def update( - self, - user_id: str, - *, - project_id: str, - role: Literal["owner", "member"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectUser: - """ - Modifies a user's role in the project. - - Args: - role: `owner` or `member` - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return await self._post( - f"/organization/projects/{project_id}/users/{user_id}", - body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ProjectUser, - ) - - async def list( - self, - project_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserListResponse: - """ - Returns a list of users in the project. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._get( - f"/organization/projects/{project_id}/users", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "limit": limit, - }, - user_list_params.UserListParams, - ), - ), - cast_to=UserListResponse, - ) - - async def delete( - self, - user_id: str, - *, - project_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserDeleteResponse: - """ - Deletes a user from the project. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return await self._delete( - f"/organization/projects/{project_id}/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=UserDeleteResponse, - ) - - async def add( - self, - project_id: str, - *, - role: Literal["owner", "member"], - user_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ProjectUser: - """Adds a user to the project. - - Users must already be members of the organization to - be added to a project. - - Args: - role: `owner` or `member` - - user_id: The ID of the user. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not project_id: - raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") - return await self._post( - f"/organization/projects/{project_id}/users", - body=await async_maybe_transform( - { - "role": role, - "user_id": user_id, - }, - user_add_params.UserAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ProjectUser, - ) - - -class UsersResourceWithRawResponse: - def __init__(self, users: UsersResource) -> None: - self._users = users - - self.retrieve = to_raw_response_wrapper( - users.retrieve, - ) - self.update = to_raw_response_wrapper( - users.update, - ) - self.list = to_raw_response_wrapper( - users.list, - ) - self.delete = to_raw_response_wrapper( - users.delete, - ) - self.add = to_raw_response_wrapper( - users.add, - ) - - -class AsyncUsersResourceWithRawResponse: - def __init__(self, users: AsyncUsersResource) -> None: - self._users = users - - self.retrieve = async_to_raw_response_wrapper( - users.retrieve, - ) - self.update = async_to_raw_response_wrapper( - users.update, - ) - self.list = async_to_raw_response_wrapper( - users.list, - ) - self.delete = async_to_raw_response_wrapper( - users.delete, - ) - self.add = async_to_raw_response_wrapper( - users.add, - ) - - -class UsersResourceWithStreamingResponse: - def __init__(self, users: UsersResource) -> None: - self._users = users - - self.retrieve = to_streamed_response_wrapper( - users.retrieve, - ) - self.update = to_streamed_response_wrapper( - users.update, - ) - self.list = to_streamed_response_wrapper( - users.list, - ) - self.delete = to_streamed_response_wrapper( - users.delete, - ) - self.add = to_streamed_response_wrapper( - users.add, - ) - - -class AsyncUsersResourceWithStreamingResponse: - def __init__(self, users: AsyncUsersResource) -> None: - self._users = users - - self.retrieve = async_to_streamed_response_wrapper( - users.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - users.update, - ) - self.list = async_to_streamed_response_wrapper( - users.list, - ) - self.delete = async_to_streamed_response_wrapper( - users.delete, - ) - self.add = async_to_streamed_response_wrapper( - users.add, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/usage.py b/src/digitalocean_genai_sdk/resources/organization/usage.py deleted file mode 100644 index 2eae3b13..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/usage.py +++ /dev/null @@ -1,1543 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.organization import ( - usage_images_params, - usage_embeddings_params, - usage_completions_params, - usage_moderations_params, - usage_vector_stores_params, - usage_audio_speeches_params, - usage_audio_transcriptions_params, - usage_code_interpreter_sessions_params, -) -from ...types.usage_response import UsageResponse - -__all__ = ["UsageResource", "AsyncUsageResource"] - - -class UsageResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> UsageResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return UsageResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> UsageResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return UsageResourceWithStreamingResponse(self) - - def audio_speeches( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get audio speeches usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/audio_speeches", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_audio_speeches_params.UsageAudioSpeechesParams, - ), - ), - cast_to=UsageResponse, - ) - - def audio_transcriptions( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get audio transcriptions usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/audio_transcriptions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_audio_transcriptions_params.UsageAudioTranscriptionsParams, - ), - ), - cast_to=UsageResponse, - ) - - def code_interpreter_sessions( - self, - *, - start_time: int, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get code interpreter sessions usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/code_interpreter_sessions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "page": page, - "project_ids": project_ids, - }, - usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams, - ), - ), - cast_to=UsageResponse, - ) - - def completions( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - batch: bool | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get completions usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By - default, return both. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of - them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/completions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "batch": batch, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_completions_params.UsageCompletionsParams, - ), - ), - cast_to=UsageResponse, - ) - - def embeddings( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get embeddings usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/embeddings", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_embeddings_params.UsageEmbeddingsParams, - ), - ), - cast_to=UsageResponse, - ) - - def images( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] - | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN, - sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get images usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any - combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - sizes: Return only usages for these image sizes. Possible values are `256x256`, - `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them. - - sources: Return only usages for these sources. Possible values are `image.generation`, - `image.edit`, `image.variation` or any combination of them. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/images", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "sizes": sizes, - "sources": sources, - "user_ids": user_ids, - }, - usage_images_params.UsageImagesParams, - ), - ), - cast_to=UsageResponse, - ) - - def moderations( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get moderations usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/moderations", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_moderations_params.UsageModerationsParams, - ), - ), - cast_to=UsageResponse, - ) - - def vector_stores( - self, - *, - start_time: int, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get vector stores usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/usage/vector_stores", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "start_time": start_time, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "page": page, - "project_ids": project_ids, - }, - usage_vector_stores_params.UsageVectorStoresParams, - ), - ), - cast_to=UsageResponse, - ) - - -class AsyncUsageResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncUsageResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncUsageResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncUsageResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncUsageResourceWithStreamingResponse(self) - - async def audio_speeches( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get audio speeches usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/audio_speeches", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_audio_speeches_params.UsageAudioSpeechesParams, - ), - ), - cast_to=UsageResponse, - ) - - async def audio_transcriptions( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get audio transcriptions usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/audio_transcriptions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_audio_transcriptions_params.UsageAudioTranscriptionsParams, - ), - ), - cast_to=UsageResponse, - ) - - async def code_interpreter_sessions( - self, - *, - start_time: int, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get code interpreter sessions usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/code_interpreter_sessions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "page": page, - "project_ids": project_ids, - }, - usage_code_interpreter_sessions_params.UsageCodeInterpreterSessionsParams, - ), - ), - cast_to=UsageResponse, - ) - - async def completions( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - batch: bool | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get completions usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - batch: If `true`, return batch jobs only. If `false`, return non-batch jobs only. By - default, return both. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of - them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/completions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "batch": batch, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_completions_params.UsageCompletionsParams, - ), - ), - cast_to=UsageResponse, - ) - - async def embeddings( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get embeddings usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/embeddings", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_embeddings_params.UsageEmbeddingsParams, - ), - ), - cast_to=UsageResponse, - ) - - async def images( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] - | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] | NotGiven = NOT_GIVEN, - sources: List[Literal["image.generation", "image.edit", "image.variation"]] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get images usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any - combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - sizes: Return only usages for these image sizes. Possible values are `256x256`, - `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them. - - sources: Return only usages for these sources. Possible values are `image.generation`, - `image.edit`, `image.variation` or any combination of them. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/images", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "sizes": sizes, - "sources": sources, - "user_ids": user_ids, - }, - usage_images_params.UsageImagesParams, - ), - ), - cast_to=UsageResponse, - ) - - async def moderations( - self, - *, - start_time: int, - api_key_ids: List[str] | NotGiven = NOT_GIVEN, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - models: List[str] | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - user_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get moderations usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - api_key_ids: Return only usage for these API keys. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`, `user_id`, `api_key_id`, `model` or any combination of them. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - models: Return only usage for these models. - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - user_ids: Return only usage for these users. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/moderations", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "api_key_ids": api_key_ids, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "models": models, - "page": page, - "project_ids": project_ids, - "user_ids": user_ids, - }, - usage_moderations_params.UsageModerationsParams, - ), - ), - cast_to=UsageResponse, - ) - - async def vector_stores( - self, - *, - start_time: int, - bucket_width: Literal["1m", "1h", "1d"] | NotGiven = NOT_GIVEN, - end_time: int | NotGiven = NOT_GIVEN, - group_by: List[Literal["project_id"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - project_ids: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UsageResponse: - """ - Get vector stores usage details for the organization. - - Args: - start_time: Start time (Unix seconds) of the query time range, inclusive. - - bucket_width: Width of each time bucket in response. Currently `1m`, `1h` and `1d` are - supported, default to `1d`. - - end_time: End time (Unix seconds) of the query time range, exclusive. - - group_by: Group the usage data by the specified fields. Support fields include - `project_id`. - - limit: Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - - page: A cursor for use in pagination. Corresponding to the `next_page` field from the - previous response. - - project_ids: Return only usage for these projects. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/usage/vector_stores", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "start_time": start_time, - "bucket_width": bucket_width, - "end_time": end_time, - "group_by": group_by, - "limit": limit, - "page": page, - "project_ids": project_ids, - }, - usage_vector_stores_params.UsageVectorStoresParams, - ), - ), - cast_to=UsageResponse, - ) - - -class UsageResourceWithRawResponse: - def __init__(self, usage: UsageResource) -> None: - self._usage = usage - - self.audio_speeches = to_raw_response_wrapper( - usage.audio_speeches, - ) - self.audio_transcriptions = to_raw_response_wrapper( - usage.audio_transcriptions, - ) - self.code_interpreter_sessions = to_raw_response_wrapper( - usage.code_interpreter_sessions, - ) - self.completions = to_raw_response_wrapper( - usage.completions, - ) - self.embeddings = to_raw_response_wrapper( - usage.embeddings, - ) - self.images = to_raw_response_wrapper( - usage.images, - ) - self.moderations = to_raw_response_wrapper( - usage.moderations, - ) - self.vector_stores = to_raw_response_wrapper( - usage.vector_stores, - ) - - -class AsyncUsageResourceWithRawResponse: - def __init__(self, usage: AsyncUsageResource) -> None: - self._usage = usage - - self.audio_speeches = async_to_raw_response_wrapper( - usage.audio_speeches, - ) - self.audio_transcriptions = async_to_raw_response_wrapper( - usage.audio_transcriptions, - ) - self.code_interpreter_sessions = async_to_raw_response_wrapper( - usage.code_interpreter_sessions, - ) - self.completions = async_to_raw_response_wrapper( - usage.completions, - ) - self.embeddings = async_to_raw_response_wrapper( - usage.embeddings, - ) - self.images = async_to_raw_response_wrapper( - usage.images, - ) - self.moderations = async_to_raw_response_wrapper( - usage.moderations, - ) - self.vector_stores = async_to_raw_response_wrapper( - usage.vector_stores, - ) - - -class UsageResourceWithStreamingResponse: - def __init__(self, usage: UsageResource) -> None: - self._usage = usage - - self.audio_speeches = to_streamed_response_wrapper( - usage.audio_speeches, - ) - self.audio_transcriptions = to_streamed_response_wrapper( - usage.audio_transcriptions, - ) - self.code_interpreter_sessions = to_streamed_response_wrapper( - usage.code_interpreter_sessions, - ) - self.completions = to_streamed_response_wrapper( - usage.completions, - ) - self.embeddings = to_streamed_response_wrapper( - usage.embeddings, - ) - self.images = to_streamed_response_wrapper( - usage.images, - ) - self.moderations = to_streamed_response_wrapper( - usage.moderations, - ) - self.vector_stores = to_streamed_response_wrapper( - usage.vector_stores, - ) - - -class AsyncUsageResourceWithStreamingResponse: - def __init__(self, usage: AsyncUsageResource) -> None: - self._usage = usage - - self.audio_speeches = async_to_streamed_response_wrapper( - usage.audio_speeches, - ) - self.audio_transcriptions = async_to_streamed_response_wrapper( - usage.audio_transcriptions, - ) - self.code_interpreter_sessions = async_to_streamed_response_wrapper( - usage.code_interpreter_sessions, - ) - self.completions = async_to_streamed_response_wrapper( - usage.completions, - ) - self.embeddings = async_to_streamed_response_wrapper( - usage.embeddings, - ) - self.images = async_to_streamed_response_wrapper( - usage.images, - ) - self.moderations = async_to_streamed_response_wrapper( - usage.moderations, - ) - self.vector_stores = async_to_streamed_response_wrapper( - usage.vector_stores, - ) diff --git a/src/digitalocean_genai_sdk/resources/organization/users.py b/src/digitalocean_genai_sdk/resources/organization/users.py deleted file mode 100644 index 44fb2f5a..00000000 --- a/src/digitalocean_genai_sdk/resources/organization/users.py +++ /dev/null @@ -1,454 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.organization import user_list_params, user_update_params -from ...types.organization.organization_user import OrganizationUser -from ...types.organization.user_list_response import UserListResponse -from ...types.organization.user_delete_response import UserDeleteResponse - -__all__ = ["UsersResource", "AsyncUsersResource"] - - -class UsersResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> UsersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return UsersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> UsersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return UsersResourceWithStreamingResponse(self) - - def retrieve( - self, - user_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OrganizationUser: - """ - Retrieves a user by their identifier. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return self._get( - f"/organization/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OrganizationUser, - ) - - def update( - self, - user_id: str, - *, - role: Literal["owner", "reader"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OrganizationUser: - """ - Modifies a user's role in the organization. - - Args: - role: `owner` or `reader` - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return self._post( - f"/organization/users/{user_id}", - body=maybe_transform({"role": role}, user_update_params.UserUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OrganizationUser, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - emails: List[str] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserListResponse: - """ - Lists all of the users in the organization. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - emails: Filter by the email address of users. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/organization/users", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "emails": emails, - "limit": limit, - }, - user_list_params.UserListParams, - ), - ), - cast_to=UserListResponse, - ) - - def delete( - self, - user_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserDeleteResponse: - """ - Deletes a user from the organization. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return self._delete( - f"/organization/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=UserDeleteResponse, - ) - - -class AsyncUsersResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncUsersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncUsersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncUsersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncUsersResourceWithStreamingResponse(self) - - async def retrieve( - self, - user_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OrganizationUser: - """ - Retrieves a user by their identifier. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return await self._get( - f"/organization/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OrganizationUser, - ) - - async def update( - self, - user_id: str, - *, - role: Literal["owner", "reader"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> OrganizationUser: - """ - Modifies a user's role in the organization. - - Args: - role: `owner` or `reader` - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return await self._post( - f"/organization/users/{user_id}", - body=await async_maybe_transform({"role": role}, user_update_params.UserUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=OrganizationUser, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - emails: List[str] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserListResponse: - """ - Lists all of the users in the organization. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - emails: Filter by the email address of users. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/organization/users", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "emails": emails, - "limit": limit, - }, - user_list_params.UserListParams, - ), - ), - cast_to=UserListResponse, - ) - - async def delete( - self, - user_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UserDeleteResponse: - """ - Deletes a user from the organization. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not user_id: - raise ValueError(f"Expected a non-empty value for `user_id` but received {user_id!r}") - return await self._delete( - f"/organization/users/{user_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=UserDeleteResponse, - ) - - -class UsersResourceWithRawResponse: - def __init__(self, users: UsersResource) -> None: - self._users = users - - self.retrieve = to_raw_response_wrapper( - users.retrieve, - ) - self.update = to_raw_response_wrapper( - users.update, - ) - self.list = to_raw_response_wrapper( - users.list, - ) - self.delete = to_raw_response_wrapper( - users.delete, - ) - - -class AsyncUsersResourceWithRawResponse: - def __init__(self, users: AsyncUsersResource) -> None: - self._users = users - - self.retrieve = async_to_raw_response_wrapper( - users.retrieve, - ) - self.update = async_to_raw_response_wrapper( - users.update, - ) - self.list = async_to_raw_response_wrapper( - users.list, - ) - self.delete = async_to_raw_response_wrapper( - users.delete, - ) - - -class UsersResourceWithStreamingResponse: - def __init__(self, users: UsersResource) -> None: - self._users = users - - self.retrieve = to_streamed_response_wrapper( - users.retrieve, - ) - self.update = to_streamed_response_wrapper( - users.update, - ) - self.list = to_streamed_response_wrapper( - users.list, - ) - self.delete = to_streamed_response_wrapper( - users.delete, - ) - - -class AsyncUsersResourceWithStreamingResponse: - def __init__(self, users: AsyncUsersResource) -> None: - self._users = users - - self.retrieve = async_to_streamed_response_wrapper( - users.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - users.update, - ) - self.list = async_to_streamed_response_wrapper( - users.list, - ) - self.delete = async_to_streamed_response_wrapper( - users.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/realtime.py b/src/digitalocean_genai_sdk/resources/realtime.py deleted file mode 100644 index bfa61beb..00000000 --- a/src/digitalocean_genai_sdk/resources/realtime.py +++ /dev/null @@ -1,574 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal - -import httpx - -from ..types import realtime_create_session_params, realtime_create_transcription_session_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.voice_ids_shared_param import VoiceIDsSharedParam -from ..types.realtime_create_session_response import RealtimeCreateSessionResponse -from ..types.realtime_create_transcription_session_response import RealtimeCreateTranscriptionSessionResponse - -__all__ = ["RealtimeResource", "AsyncRealtimeResource"] - - -class RealtimeResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> RealtimeResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return RealtimeResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RealtimeResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return RealtimeResourceWithStreamingResponse(self) - - def create_session( - self, - *, - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN, - input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, - model: Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - | NotGiven = NOT_GIVEN, - output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - tool_choice: str | NotGiven = NOT_GIVEN, - tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN, - turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RealtimeCreateSessionResponse: - """ - Create an ephemeral API token for use in client-side applications with the - Realtime API. Can be configured with the same session parameters as the - `session.update` client event. - - It responds with a session object, plus a `client_secret` key which contains a - usable ephemeral API token that can be used to authenticate browser clients for - the Realtime API. - - Args: - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For - `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel - (mono), and little-endian byte order. - - input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn - off. Noise reduction filters audio added to the input audio buffer before it is - sent to VAD and the model. Filtering the audio can improve VAD and turn - detection accuracy (reducing false positives) and model performance by improving - perception of the input audio. - - input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to - `null` to turn off once on. Input audio transcription is not native to the - model, since the model consumes audio directly. Transcription runs - asynchronously through - [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) - and should be treated as guidance of input audio content rather than precisely - what the model heard. The client can optionally set the language and prompt for - transcription, these offer additional guidance to the transcription service. - - instructions: The default system instructions (i.e. system message) prepended to model calls. - This field allows the client to guide the model on desired responses. The model - can be instructed on response content and format, (e.g. "be extremely succinct", - "act friendly", "here are examples of good responses") and on audio behavior - (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The - instructions are not guaranteed to be followed by the model, but they provide - guidance to the model on the desired behavior. - - Note that the server sets default instructions which will be used if this field - is not set and are visible in the `session.created` event at the start of the - session. - - max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of - tool calls. Provide an integer between 1 and 4096 to limit output tokens, or - `inf` for the maximum available tokens for a given model. Defaults to `inf`. - - modalities: The set of modalities the model can respond with. To disable audio, set this to - ["text"]. - - model: The Realtime model used for this session. - - output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. - For `pcm16`, output audio is sampled at a rate of 24kHz. - - temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a - temperature of 0.8 is highly recommended for best performance. - - tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify - a function. - - tools: Tools (functions) available to the model. - - turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be - set to `null` to turn off, in which case the client must manually trigger model - response. Server VAD means that the model will detect the start and end of - speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to - semantically estimate whether the user has finished speaking, then dynamically - sets a timeout based on this probability. For example, if user audio trails off - with "uhhm", the model will score a low probability of turn end and wait longer - for the user to continue speaking. This can be useful for more natural - conversations, but may have a higher latency. - - voice: The voice the model uses to respond. Voice cannot be changed during the session - once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/realtime/sessions", - body=maybe_transform( - { - "input_audio_format": input_audio_format, - "input_audio_noise_reduction": input_audio_noise_reduction, - "input_audio_transcription": input_audio_transcription, - "instructions": instructions, - "max_response_output_tokens": max_response_output_tokens, - "modalities": modalities, - "model": model, - "output_audio_format": output_audio_format, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "turn_detection": turn_detection, - "voice": voice, - }, - realtime_create_session_params.RealtimeCreateSessionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RealtimeCreateSessionResponse, - ) - - def create_transcription_session( - self, - *, - include: List[str] | NotGiven = NOT_GIVEN, - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction - | NotGiven = NOT_GIVEN, - input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription - | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, - turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RealtimeCreateTranscriptionSessionResponse: - """ - Create an ephemeral API token for use in client-side applications with the - Realtime API specifically for realtime transcriptions. Can be configured with - the same session parameters as the `transcription_session.update` client event. - - It responds with a session object, plus a `client_secret` key which contains a - usable ephemeral API token that can be used to authenticate browser clients for - the Realtime API. - - Args: - include: - The set of items to include in the transcription. Current available items are: - - - `item.input_audio_transcription.logprobs` - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For - `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel - (mono), and little-endian byte order. - - input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn - off. Noise reduction filters audio added to the input audio buffer before it is - sent to VAD and the model. Filtering the audio can improve VAD and turn - detection accuracy (reducing false positives) and model performance by improving - perception of the input audio. - - input_audio_transcription: Configuration for input audio transcription. The client can optionally set the - language and prompt for transcription, these offer additional guidance to the - transcription service. - - modalities: The set of modalities the model can respond with. To disable audio, set this to - ["text"]. - - turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be - set to `null` to turn off, in which case the client must manually trigger model - response. Server VAD means that the model will detect the start and end of - speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to - semantically estimate whether the user has finished speaking, then dynamically - sets a timeout based on this probability. For example, if user audio trails off - with "uhhm", the model will score a low probability of turn end and wait longer - for the user to continue speaking. This can be useful for more natural - conversations, but may have a higher latency. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/realtime/transcription_sessions", - body=maybe_transform( - { - "include": include, - "input_audio_format": input_audio_format, - "input_audio_noise_reduction": input_audio_noise_reduction, - "input_audio_transcription": input_audio_transcription, - "modalities": modalities, - "turn_detection": turn_detection, - }, - realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RealtimeCreateTranscriptionSessionResponse, - ) - - -class AsyncRealtimeResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncRealtimeResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRealtimeResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRealtimeResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncRealtimeResourceWithStreamingResponse(self) - - async def create_session( - self, - *, - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_noise_reduction: realtime_create_session_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN, - input_audio_transcription: realtime_create_session_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, - model: Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - | NotGiven = NOT_GIVEN, - output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - tool_choice: str | NotGiven = NOT_GIVEN, - tools: Iterable[realtime_create_session_params.Tool] | NotGiven = NOT_GIVEN, - turn_detection: realtime_create_session_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: VoiceIDsSharedParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RealtimeCreateSessionResponse: - """ - Create an ephemeral API token for use in client-side applications with the - Realtime API. Can be configured with the same session parameters as the - `session.update` client event. - - It responds with a session object, plus a `client_secret` key which contains a - usable ephemeral API token that can be used to authenticate browser clients for - the Realtime API. - - Args: - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For - `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel - (mono), and little-endian byte order. - - input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn - off. Noise reduction filters audio added to the input audio buffer before it is - sent to VAD and the model. Filtering the audio can improve VAD and turn - detection accuracy (reducing false positives) and model performance by improving - perception of the input audio. - - input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to - `null` to turn off once on. Input audio transcription is not native to the - model, since the model consumes audio directly. Transcription runs - asynchronously through - [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) - and should be treated as guidance of input audio content rather than precisely - what the model heard. The client can optionally set the language and prompt for - transcription, these offer additional guidance to the transcription service. - - instructions: The default system instructions (i.e. system message) prepended to model calls. - This field allows the client to guide the model on desired responses. The model - can be instructed on response content and format, (e.g. "be extremely succinct", - "act friendly", "here are examples of good responses") and on audio behavior - (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The - instructions are not guaranteed to be followed by the model, but they provide - guidance to the model on the desired behavior. - - Note that the server sets default instructions which will be used if this field - is not set and are visible in the `session.created` event at the start of the - session. - - max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of - tool calls. Provide an integer between 1 and 4096 to limit output tokens, or - `inf` for the maximum available tokens for a given model. Defaults to `inf`. - - modalities: The set of modalities the model can respond with. To disable audio, set this to - ["text"]. - - model: The Realtime model used for this session. - - output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. - For `pcm16`, output audio is sampled at a rate of 24kHz. - - temperature: Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a - temperature of 0.8 is highly recommended for best performance. - - tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify - a function. - - tools: Tools (functions) available to the model. - - turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be - set to `null` to turn off, in which case the client must manually trigger model - response. Server VAD means that the model will detect the start and end of - speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to - semantically estimate whether the user has finished speaking, then dynamically - sets a timeout based on this probability. For example, if user audio trails off - with "uhhm", the model will score a low probability of turn end and wait longer - for the user to continue speaking. This can be useful for more natural - conversations, but may have a higher latency. - - voice: The voice the model uses to respond. Voice cannot be changed during the session - once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/realtime/sessions", - body=await async_maybe_transform( - { - "input_audio_format": input_audio_format, - "input_audio_noise_reduction": input_audio_noise_reduction, - "input_audio_transcription": input_audio_transcription, - "instructions": instructions, - "max_response_output_tokens": max_response_output_tokens, - "modalities": modalities, - "model": model, - "output_audio_format": output_audio_format, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "turn_detection": turn_detection, - "voice": voice, - }, - realtime_create_session_params.RealtimeCreateSessionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RealtimeCreateSessionResponse, - ) - - async def create_transcription_session( - self, - *, - include: List[str] | NotGiven = NOT_GIVEN, - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_noise_reduction: realtime_create_transcription_session_params.InputAudioNoiseReduction - | NotGiven = NOT_GIVEN, - input_audio_transcription: realtime_create_transcription_session_params.InputAudioTranscription - | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, - turn_detection: realtime_create_transcription_session_params.TurnDetection | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RealtimeCreateTranscriptionSessionResponse: - """ - Create an ephemeral API token for use in client-side applications with the - Realtime API specifically for realtime transcriptions. Can be configured with - the same session parameters as the `transcription_session.update` client event. - - It responds with a session object, plus a `client_secret` key which contains a - usable ephemeral API token that can be used to authenticate browser clients for - the Realtime API. - - Args: - include: - The set of items to include in the transcription. Current available items are: - - - `item.input_audio_transcription.logprobs` - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For - `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel - (mono), and little-endian byte order. - - input_audio_noise_reduction: Configuration for input audio noise reduction. This can be set to `null` to turn - off. Noise reduction filters audio added to the input audio buffer before it is - sent to VAD and the model. Filtering the audio can improve VAD and turn - detection accuracy (reducing false positives) and model performance by improving - perception of the input audio. - - input_audio_transcription: Configuration for input audio transcription. The client can optionally set the - language and prompt for transcription, these offer additional guidance to the - transcription service. - - modalities: The set of modalities the model can respond with. To disable audio, set this to - ["text"]. - - turn_detection: Configuration for turn detection, ether Server VAD or Semantic VAD. This can be - set to `null` to turn off, in which case the client must manually trigger model - response. Server VAD means that the model will detect the start and end of - speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to - semantically estimate whether the user has finished speaking, then dynamically - sets a timeout based on this probability. For example, if user audio trails off - with "uhhm", the model will score a low probability of turn end and wait longer - for the user to continue speaking. This can be useful for more natural - conversations, but may have a higher latency. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/realtime/transcription_sessions", - body=await async_maybe_transform( - { - "include": include, - "input_audio_format": input_audio_format, - "input_audio_noise_reduction": input_audio_noise_reduction, - "input_audio_transcription": input_audio_transcription, - "modalities": modalities, - "turn_detection": turn_detection, - }, - realtime_create_transcription_session_params.RealtimeCreateTranscriptionSessionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RealtimeCreateTranscriptionSessionResponse, - ) - - -class RealtimeResourceWithRawResponse: - def __init__(self, realtime: RealtimeResource) -> None: - self._realtime = realtime - - self.create_session = to_raw_response_wrapper( - realtime.create_session, - ) - self.create_transcription_session = to_raw_response_wrapper( - realtime.create_transcription_session, - ) - - -class AsyncRealtimeResourceWithRawResponse: - def __init__(self, realtime: AsyncRealtimeResource) -> None: - self._realtime = realtime - - self.create_session = async_to_raw_response_wrapper( - realtime.create_session, - ) - self.create_transcription_session = async_to_raw_response_wrapper( - realtime.create_transcription_session, - ) - - -class RealtimeResourceWithStreamingResponse: - def __init__(self, realtime: RealtimeResource) -> None: - self._realtime = realtime - - self.create_session = to_streamed_response_wrapper( - realtime.create_session, - ) - self.create_transcription_session = to_streamed_response_wrapper( - realtime.create_transcription_session, - ) - - -class AsyncRealtimeResourceWithStreamingResponse: - def __init__(self, realtime: AsyncRealtimeResource) -> None: - self._realtime = realtime - - self.create_session = async_to_streamed_response_wrapper( - realtime.create_session, - ) - self.create_transcription_session = async_to_streamed_response_wrapper( - realtime.create_transcription_session, - ) diff --git a/src/digitalocean_genai_sdk/resources/responses.py b/src/digitalocean_genai_sdk/resources/responses.py deleted file mode 100644 index 1890002e..00000000 --- a/src/digitalocean_genai_sdk/resources/responses.py +++ /dev/null @@ -1,902 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal - -import httpx - -from ..types import response_create_params, response_retrieve_params, response_list_input_items_params -from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.response import Response -from ..types.includable import Includable -from ..types.response_list_input_items_response import ResponseListInputItemsResponse - -__all__ = ["ResponsesResource", "AsyncResponsesResource"] - - -class ResponsesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ResponsesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ResponsesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ResponsesResourceWithStreamingResponse(self) - - def create( - self, - *, - input: Union[str, Iterable[response_create_params.InputInputItemList]], - model: Union[ - Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "gpt-4o-search-preview", - "gpt-4o-mini-search-preview", - "gpt-4o-search-preview-2025-03-11", - "gpt-4o-mini-search-preview-2025-03-11", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - "o1-pro", - "o1-pro-2025-03-19", - "computer-use-preview", - "computer-use-preview-2025-03-11", - ], - str, - ], - include: Optional[List[Includable]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Response: - """Creates a model response. - - Provide [text](/docs/guides/text) or - [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or - [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own - [custom code](/docs/guides/function-calling) or use built-in - [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or - [file search](/docs/guides/tools-file-search) to use your own data as input for - the model's response. - - Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Image inputs](/docs/guides/images) - - [File inputs](/docs/guides/pdf-files) - - [Conversation state](/docs/guides/conversation-state) - - [Function calling](/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the [model guide](/docs/models) to browse and compare - available models. - - include: Specify additional output data to include in the model response. Currently - supported values are: - - - `file_search_call.results`: Include the search results of - - the file search tool call. - - - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls from the computer - call output. - - instructions: Inserts a system (or developer) message as the first item in the model's - context. - - When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. - - max_output_tokens: An upper bound for the number of tokens that can be generated for a response, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - parallel_tool_calls: Whether to allow the model to run tool calls in parallel. - - previous_response_id: The unique ID of the previous response to the model. Use this to create - multi-turn conversations. Learn more about - [conversation state](/docs/guides/conversation-state). - - reasoning: **o-series models only** - - Configuration options for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). - - store: Whether to store the generated model response for later retrieval via API. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the [Streaming section below](/docs/api-reference/responses-streaming) for - more information. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. We generally recommend altering this or `top_p` but - not both. - - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Structured Outputs](/docs/guides/structured-outputs) - - tool_choice: How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. - - tools: An array of tools the model may call while generating a response. You can - specify which tool to use by setting the `tool_choice` parameter. - - The two categories of tools you can provide the model are: - - - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - capabilities, like [web search](/docs/guides/tools-web-search) or - [file search](/docs/guides/tools-file-search). Learn more about - [built-in tools](/docs/guides/tools). - - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about - [function calling](/docs/guides/function-calling). - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - truncation: The truncation strategy to use for the model response. - - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size - for a model, the request will fail with a 400 error. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/responses", - body=maybe_transform( - { - "input": input, - "model": model, - "include": include, - "instructions": instructions, - "max_output_tokens": max_output_tokens, - "metadata": metadata, - "parallel_tool_calls": parallel_tool_calls, - "previous_response_id": previous_response_id, - "reasoning": reasoning, - "store": store, - "stream": stream, - "temperature": temperature, - "text": text, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation": truncation, - "user": user, - }, - response_create_params.ResponseCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Response, - ) - - def retrieve( - self, - response_id: str, - *, - include: List[Includable] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Response: - """ - Retrieves a model response with the given ID. - - Args: - include: Specify additional output data to include in the response. Currently supported - values are: - - - `file_search_call.results`: Include the search results of - - the file search tool call. - - - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls from the computer - call output. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not response_id: - raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") - return self._get( - f"/responses/{response_id}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"include": include}, response_retrieve_params.ResponseRetrieveParams), - ), - cast_to=Response, - ) - - def delete( - self, - response_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Deletes a model response with the given ID. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not response_id: - raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/responses/{response_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def list_input_items( - self, - response_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResponseListInputItemsResponse: - """ - Returns a list of input items for a given response. - - Args: - after: An item ID to list items after, used in pagination. - - before: An item ID to list items before, used in pagination. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: The order to return the input items in. Default is `asc`. - - - `asc`: Return the input items in ascending order. - - `desc`: Return the input items in descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not response_id: - raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") - return self._get( - f"/responses/{response_id}/input_items", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - response_list_input_items_params.ResponseListInputItemsParams, - ), - ), - cast_to=ResponseListInputItemsResponse, - ) - - -class AsyncResponsesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncResponsesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncResponsesResourceWithStreamingResponse(self) - - async def create( - self, - *, - input: Union[str, Iterable[response_create_params.InputInputItemList]], - model: Union[ - Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "gpt-4o-search-preview", - "gpt-4o-mini-search-preview", - "gpt-4o-search-preview-2025-03-11", - "gpt-4o-mini-search-preview-2025-03-11", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - "o1-pro", - "o1-pro-2025-03-19", - "computer-use-preview", - "computer-use-preview-2025-03-11", - ], - str, - ], - include: Optional[List[Includable]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, - previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, - reasoning: Optional[response_create_params.Reasoning] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, - tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[response_create_params.Tool] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Response: - """Creates a model response. - - Provide [text](/docs/guides/text) or - [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or - [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own - [custom code](/docs/guides/function-calling) or use built-in - [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or - [file search](/docs/guides/tools-file-search) to use your own data as input for - the model's response. - - Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Image inputs](/docs/guides/images) - - [File inputs](/docs/guides/pdf-files) - - [Conversation state](/docs/guides/conversation-state) - - [Function calling](/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the [model guide](/docs/models) to browse and compare - available models. - - include: Specify additional output data to include in the model response. Currently - supported values are: - - - `file_search_call.results`: Include the search results of - - the file search tool call. - - - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls from the computer - call output. - - instructions: Inserts a system (or developer) message as the first item in the model's - context. - - When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. - - max_output_tokens: An upper bound for the number of tokens that can be generated for a response, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - parallel_tool_calls: Whether to allow the model to run tool calls in parallel. - - previous_response_id: The unique ID of the previous response to the model. Use this to create - multi-turn conversations. Learn more about - [conversation state](/docs/guides/conversation-state). - - reasoning: **o-series models only** - - Configuration options for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). - - store: Whether to store the generated model response for later retrieval via API. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the [Streaming section below](/docs/api-reference/responses-streaming) for - more information. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. We generally recommend altering this or `top_p` but - not both. - - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Structured Outputs](/docs/guides/structured-outputs) - - tool_choice: How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. - - tools: An array of tools the model may call while generating a response. You can - specify which tool to use by setting the `tool_choice` parameter. - - The two categories of tools you can provide the model are: - - - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - capabilities, like [web search](/docs/guides/tools-web-search) or - [file search](/docs/guides/tools-file-search). Learn more about - [built-in tools](/docs/guides/tools). - - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about - [function calling](/docs/guides/function-calling). - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - truncation: The truncation strategy to use for the model response. - - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size - for a model, the request will fail with a 400 error. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/responses", - body=await async_maybe_transform( - { - "input": input, - "model": model, - "include": include, - "instructions": instructions, - "max_output_tokens": max_output_tokens, - "metadata": metadata, - "parallel_tool_calls": parallel_tool_calls, - "previous_response_id": previous_response_id, - "reasoning": reasoning, - "store": store, - "stream": stream, - "temperature": temperature, - "text": text, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation": truncation, - "user": user, - }, - response_create_params.ResponseCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Response, - ) - - async def retrieve( - self, - response_id: str, - *, - include: List[Includable] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Response: - """ - Retrieves a model response with the given ID. - - Args: - include: Specify additional output data to include in the response. Currently supported - values are: - - - `file_search_call.results`: Include the search results of - - the file search tool call. - - - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls from the computer - call output. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not response_id: - raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") - return await self._get( - f"/responses/{response_id}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"include": include}, response_retrieve_params.ResponseRetrieveParams - ), - ), - cast_to=Response, - ) - - async def delete( - self, - response_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Deletes a model response with the given ID. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not response_id: - raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/responses/{response_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def list_input_items( - self, - response_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResponseListInputItemsResponse: - """ - Returns a list of input items for a given response. - - Args: - after: An item ID to list items after, used in pagination. - - before: An item ID to list items before, used in pagination. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: The order to return the input items in. Default is `asc`. - - - `asc`: Return the input items in ascending order. - - `desc`: Return the input items in descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not response_id: - raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") - return await self._get( - f"/responses/{response_id}/input_items", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - response_list_input_items_params.ResponseListInputItemsParams, - ), - ), - cast_to=ResponseListInputItemsResponse, - ) - - -class ResponsesResourceWithRawResponse: - def __init__(self, responses: ResponsesResource) -> None: - self._responses = responses - - self.create = to_raw_response_wrapper( - responses.create, - ) - self.retrieve = to_raw_response_wrapper( - responses.retrieve, - ) - self.delete = to_raw_response_wrapper( - responses.delete, - ) - self.list_input_items = to_raw_response_wrapper( - responses.list_input_items, - ) - - -class AsyncResponsesResourceWithRawResponse: - def __init__(self, responses: AsyncResponsesResource) -> None: - self._responses = responses - - self.create = async_to_raw_response_wrapper( - responses.create, - ) - self.retrieve = async_to_raw_response_wrapper( - responses.retrieve, - ) - self.delete = async_to_raw_response_wrapper( - responses.delete, - ) - self.list_input_items = async_to_raw_response_wrapper( - responses.list_input_items, - ) - - -class ResponsesResourceWithStreamingResponse: - def __init__(self, responses: ResponsesResource) -> None: - self._responses = responses - - self.create = to_streamed_response_wrapper( - responses.create, - ) - self.retrieve = to_streamed_response_wrapper( - responses.retrieve, - ) - self.delete = to_streamed_response_wrapper( - responses.delete, - ) - self.list_input_items = to_streamed_response_wrapper( - responses.list_input_items, - ) - - -class AsyncResponsesResourceWithStreamingResponse: - def __init__(self, responses: AsyncResponsesResource) -> None: - self._responses = responses - - self.create = async_to_streamed_response_wrapper( - responses.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - responses.retrieve, - ) - self.delete = async_to_streamed_response_wrapper( - responses.delete, - ) - self.list_input_items = async_to_streamed_response_wrapper( - responses.list_input_items, - ) diff --git a/src/digitalocean_genai_sdk/resources/threads/__init__.py b/src/digitalocean_genai_sdk/resources/threads/__init__.py deleted file mode 100644 index 736b9bd6..00000000 --- a/src/digitalocean_genai_sdk/resources/threads/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .runs import ( - RunsResource, - AsyncRunsResource, - RunsResourceWithRawResponse, - AsyncRunsResourceWithRawResponse, - RunsResourceWithStreamingResponse, - AsyncRunsResourceWithStreamingResponse, -) -from .threads import ( - ThreadsResource, - AsyncThreadsResource, - ThreadsResourceWithRawResponse, - AsyncThreadsResourceWithRawResponse, - ThreadsResourceWithStreamingResponse, - AsyncThreadsResourceWithStreamingResponse, -) -from .messages import ( - MessagesResource, - AsyncMessagesResource, - MessagesResourceWithRawResponse, - AsyncMessagesResourceWithRawResponse, - MessagesResourceWithStreamingResponse, - AsyncMessagesResourceWithStreamingResponse, -) - -__all__ = [ - "RunsResource", - "AsyncRunsResource", - "RunsResourceWithRawResponse", - "AsyncRunsResourceWithRawResponse", - "RunsResourceWithStreamingResponse", - "AsyncRunsResourceWithStreamingResponse", - "MessagesResource", - "AsyncMessagesResource", - "MessagesResourceWithRawResponse", - "AsyncMessagesResourceWithRawResponse", - "MessagesResourceWithStreamingResponse", - "AsyncMessagesResourceWithStreamingResponse", - "ThreadsResource", - "AsyncThreadsResource", - "ThreadsResourceWithRawResponse", - "AsyncThreadsResourceWithRawResponse", - "ThreadsResourceWithStreamingResponse", - "AsyncThreadsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/threads/messages.py b/src/digitalocean_genai_sdk/resources/threads/messages.py deleted file mode 100644 index c4f75672..00000000 --- a/src/digitalocean_genai_sdk/resources/threads/messages.py +++ /dev/null @@ -1,654 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable, Optional -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.threads import message_list_params, message_create_params, message_update_params -from ...types.threads.message_object import MessageObject -from ...types.threads.message_list_response import MessageListResponse -from ...types.threads.message_delete_response import MessageDeleteResponse - -__all__ = ["MessagesResource", "AsyncMessagesResource"] - - -class MessagesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> MessagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return MessagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> MessagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return MessagesResourceWithStreamingResponse(self) - - def create( - self, - thread_id: str, - *, - content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]], - role: Literal["user", "assistant"], - attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageObject: - """ - Create a message. - - Args: - content: The text contents of the message. - - role: - The role of the entity that is creating the message. Allowed values include: - - - `user`: Indicates the message is sent by an actual user and should be used in - most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this - value to insert messages from the assistant into the conversation. - - attachments: A list of files attached to the message, and the tools they should be added to. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._post( - f"/threads/{thread_id}/messages", - body=maybe_transform( - { - "content": content, - "role": role, - "attachments": attachments, - "metadata": metadata, - }, - message_create_params.MessageCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageObject, - ) - - def retrieve( - self, - message_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageObject: - """ - Retrieve a message. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not message_id: - raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") - return self._get( - f"/threads/{thread_id}/messages/{message_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageObject, - ) - - def update( - self, - message_id: str, - *, - thread_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageObject: - """ - Modifies a message. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not message_id: - raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") - return self._post( - f"/threads/{thread_id}/messages/{message_id}", - body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageObject, - ) - - def list( - self, - thread_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - run_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageListResponse: - """ - Returns a list of messages for a given thread. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - run_id: Filter messages by the run ID that generated them. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._get( - f"/threads/{thread_id}/messages", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - "run_id": run_id, - }, - message_list_params.MessageListParams, - ), - ), - cast_to=MessageListResponse, - ) - - def delete( - self, - message_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageDeleteResponse: - """ - Deletes a message. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not message_id: - raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") - return self._delete( - f"/threads/{thread_id}/messages/{message_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageDeleteResponse, - ) - - -class AsyncMessagesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncMessagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncMessagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncMessagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncMessagesResourceWithStreamingResponse(self) - - async def create( - self, - thread_id: str, - *, - content: Union[str, Iterable[message_create_params.ContentArrayOfContentPart]], - role: Literal["user", "assistant"], - attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageObject: - """ - Create a message. - - Args: - content: The text contents of the message. - - role: - The role of the entity that is creating the message. Allowed values include: - - - `user`: Indicates the message is sent by an actual user and should be used in - most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this - value to insert messages from the assistant into the conversation. - - attachments: A list of files attached to the message, and the tools they should be added to. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._post( - f"/threads/{thread_id}/messages", - body=await async_maybe_transform( - { - "content": content, - "role": role, - "attachments": attachments, - "metadata": metadata, - }, - message_create_params.MessageCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageObject, - ) - - async def retrieve( - self, - message_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageObject: - """ - Retrieve a message. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not message_id: - raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") - return await self._get( - f"/threads/{thread_id}/messages/{message_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageObject, - ) - - async def update( - self, - message_id: str, - *, - thread_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageObject: - """ - Modifies a message. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not message_id: - raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") - return await self._post( - f"/threads/{thread_id}/messages/{message_id}", - body=await async_maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageObject, - ) - - async def list( - self, - thread_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - run_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageListResponse: - """ - Returns a list of messages for a given thread. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - run_id: Filter messages by the run ID that generated them. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._get( - f"/threads/{thread_id}/messages", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - "run_id": run_id, - }, - message_list_params.MessageListParams, - ), - ), - cast_to=MessageListResponse, - ) - - async def delete( - self, - message_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> MessageDeleteResponse: - """ - Deletes a message. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not message_id: - raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") - return await self._delete( - f"/threads/{thread_id}/messages/{message_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=MessageDeleteResponse, - ) - - -class MessagesResourceWithRawResponse: - def __init__(self, messages: MessagesResource) -> None: - self._messages = messages - - self.create = to_raw_response_wrapper( - messages.create, - ) - self.retrieve = to_raw_response_wrapper( - messages.retrieve, - ) - self.update = to_raw_response_wrapper( - messages.update, - ) - self.list = to_raw_response_wrapper( - messages.list, - ) - self.delete = to_raw_response_wrapper( - messages.delete, - ) - - -class AsyncMessagesResourceWithRawResponse: - def __init__(self, messages: AsyncMessagesResource) -> None: - self._messages = messages - - self.create = async_to_raw_response_wrapper( - messages.create, - ) - self.retrieve = async_to_raw_response_wrapper( - messages.retrieve, - ) - self.update = async_to_raw_response_wrapper( - messages.update, - ) - self.list = async_to_raw_response_wrapper( - messages.list, - ) - self.delete = async_to_raw_response_wrapper( - messages.delete, - ) - - -class MessagesResourceWithStreamingResponse: - def __init__(self, messages: MessagesResource) -> None: - self._messages = messages - - self.create = to_streamed_response_wrapper( - messages.create, - ) - self.retrieve = to_streamed_response_wrapper( - messages.retrieve, - ) - self.update = to_streamed_response_wrapper( - messages.update, - ) - self.list = to_streamed_response_wrapper( - messages.list, - ) - self.delete = to_streamed_response_wrapper( - messages.delete, - ) - - -class AsyncMessagesResourceWithStreamingResponse: - def __init__(self, messages: AsyncMessagesResource) -> None: - self._messages = messages - - self.create = async_to_streamed_response_wrapper( - messages.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - messages.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - messages.update, - ) - self.list = async_to_streamed_response_wrapper( - messages.list, - ) - self.delete = async_to_streamed_response_wrapper( - messages.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py b/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py deleted file mode 100644 index 70942400..00000000 --- a/src/digitalocean_genai_sdk/resources/threads/runs/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .runs import ( - RunsResource, - AsyncRunsResource, - RunsResourceWithRawResponse, - AsyncRunsResourceWithRawResponse, - RunsResourceWithStreamingResponse, - AsyncRunsResourceWithStreamingResponse, -) -from .steps import ( - StepsResource, - AsyncStepsResource, - StepsResourceWithRawResponse, - AsyncStepsResourceWithRawResponse, - StepsResourceWithStreamingResponse, - AsyncStepsResourceWithStreamingResponse, -) - -__all__ = [ - "StepsResource", - "AsyncStepsResource", - "StepsResourceWithRawResponse", - "AsyncStepsResourceWithRawResponse", - "StepsResourceWithStreamingResponse", - "AsyncStepsResourceWithStreamingResponse", - "RunsResource", - "AsyncRunsResource", - "RunsResourceWithRawResponse", - "AsyncRunsResourceWithRawResponse", - "RunsResourceWithStreamingResponse", - "AsyncRunsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py b/src/digitalocean_genai_sdk/resources/threads/runs/runs.py deleted file mode 100644 index 4e1a9dc8..00000000 --- a/src/digitalocean_genai_sdk/resources/threads/runs/runs.py +++ /dev/null @@ -1,1427 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal - -import httpx - -from .steps import ( - StepsResource, - AsyncStepsResource, - StepsResourceWithRawResponse, - AsyncStepsResourceWithRawResponse, - StepsResourceWithStreamingResponse, - AsyncStepsResourceWithStreamingResponse, -) -from ....types import ReasoningEffort -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.threads import ( - run_list_params, - run_create_params, - run_update_params, - run_create_run_params, - run_submit_tool_outputs_params, -) -from ....types.reasoning_effort import ReasoningEffort -from ....types.threads.run_object import RunObject -from ....types.threads.run_list_response import RunListResponse -from ....types.assistant_supported_models import AssistantSupportedModels -from ....types.create_thread_request_param import CreateThreadRequestParam -from ....types.threads.truncation_object_param import TruncationObjectParam -from ....types.threads.create_message_request_param import CreateMessageRequestParam -from ....types.assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam -from ....types.threads.assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam - -__all__ = ["RunsResource", "AsyncRunsResource"] - - -class RunsResource(SyncAPIResource): - @cached_property - def steps(self) -> StepsResource: - return StepsResource(self._client) - - @cached_property - def with_raw_response(self) -> RunsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return RunsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RunsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return RunsResourceWithStreamingResponse(self) - - def create( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4.5-preview", - "gpt-4.5-preview-2025-02-27", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Create a thread and run it in one request. - - Args: - assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this - run. - - instructions: Override the default system message of the assistant. This is useful for - modifying the behavior on a per-run basis. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - model: The ID of the [Model](/docs/api-reference/models) to be used to execute this - run. If a value is provided here, it will override the model associated with the - assistant. If not, the model associated with the assistant will be used. - - parallel_tool_calls: Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - thread: Options to create a new thread. If no thread is provided when running a request, - an empty thread will be created. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tool_resources: A set of resources that are used by the assistant's tools. The resources are - specific to the type of tool. For example, the `code_interpreter` tool requires - a list of file IDs, while the `file_search` tool requires a list of vector store - IDs. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/threads/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "parallel_tool_calls": parallel_tool_calls, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "thread": thread, - "tool_choice": tool_choice, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - }, - run_create_params.RunCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - def retrieve( - self, - run_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Retrieves a run. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return self._get( - f"/threads/{thread_id}/runs/{run_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - def update( - self, - run_id: str, - *, - thread_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Modifies a run. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return self._post( - f"/threads/{thread_id}/runs/{run_id}", - body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - def list( - self, - thread_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunListResponse: - """ - Returns a list of runs belonging to a thread. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._get( - f"/threads/{thread_id}/runs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - run_list_params.RunListParams, - ), - ), - cast_to=RunListResponse, - ) - - def cancel( - self, - run_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Cancels a run that is `in_progress`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return self._post( - f"/threads/{thread_id}/runs/{run_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - def create_run( - self, - thread_id: str, - *, - assistant_id: str, - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Create a run. - - Args: - assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this - run. - - include: A list of additional fields to include in the response. Currently the only - supported value is `step_details.tool_calls[*].file_search.results[*].content` - to fetch the file search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - - additional_instructions: Appends additional instructions at the end of the instructions for the run. This - is useful for modifying the behavior on a per-run basis without overriding other - instructions. - - additional_messages: Adds additional messages to the thread before creating the run. - - instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of - the assistant. This is useful for modifying the behavior on a per-run basis. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - model: The ID of the [Model](/docs/api-reference/models) to be used to execute this - run. If a value is provided here, it will override the model associated with the - assistant. If not, the model associated with the assistant will be used. - - parallel_tool_calls: Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._post( - f"/threads/{thread_id}/runs", - body=maybe_transform( - { - "assistant_id": assistant_id, - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "parallel_tool_calls": parallel_tool_calls, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - }, - run_create_run_params.RunCreateRunParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams), - ), - cast_to=RunObject, - ) - - def submit_tool_outputs( - self, - run_id: str, - *, - thread_id: str, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[bool] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - When a run has the `status: "requires_action"` and `required_action.type` is - `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - tool calls once they're all completed. All outputs must be submitted in a single - request. - - Args: - tool_outputs: A list of tools for which the outputs are being submitted. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return self._post( - f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", - body=maybe_transform( - { - "tool_outputs": tool_outputs, - "stream": stream, - }, - run_submit_tool_outputs_params.RunSubmitToolOutputsParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - -class AsyncRunsResource(AsyncAPIResource): - @cached_property - def steps(self) -> AsyncStepsResource: - return AsyncStepsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncRunsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRunsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncRunsResourceWithStreamingResponse(self) - - async def create( - self, - *, - assistant_id: str, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4.5-preview", - "gpt-4.5-preview-2025-02-27", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - thread: CreateThreadRequestParam | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tool_resources: Optional[run_create_params.ToolResources] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Create a thread and run it in one request. - - Args: - assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this - run. - - instructions: Override the default system message of the assistant. This is useful for - modifying the behavior on a per-run basis. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - model: The ID of the [Model](/docs/api-reference/models) to be used to execute this - run. If a value is provided here, it will override the model associated with the - assistant. If not, the model associated with the assistant will be used. - - parallel_tool_calls: Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - thread: Options to create a new thread. If no thread is provided when running a request, - an empty thread will be created. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tool_resources: A set of resources that are used by the assistant's tools. The resources are - specific to the type of tool. For example, the `code_interpreter` tool requires - a list of file IDs, while the `file_search` tool requires a list of vector store - IDs. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/threads/runs", - body=await async_maybe_transform( - { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "parallel_tool_calls": parallel_tool_calls, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "thread": thread, - "tool_choice": tool_choice, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - }, - run_create_params.RunCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - async def retrieve( - self, - run_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Retrieves a run. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return await self._get( - f"/threads/{thread_id}/runs/{run_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - async def update( - self, - run_id: str, - *, - thread_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Modifies a run. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return await self._post( - f"/threads/{thread_id}/runs/{run_id}", - body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - async def list( - self, - thread_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunListResponse: - """ - Returns a list of runs belonging to a thread. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._get( - f"/threads/{thread_id}/runs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - run_list_params.RunListParams, - ), - ), - cast_to=RunListResponse, - ) - - async def cancel( - self, - run_id: str, - *, - thread_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Cancels a run that is `in_progress`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return await self._post( - f"/threads/{thread_id}/runs/{run_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - async def create_run( - self, - thread_id: str, - *, - assistant_id: str, - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, - additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, - additional_messages: Optional[Iterable[CreateMessageRequestParam]] | NotGiven = NOT_GIVEN, - instructions: Optional[str] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - model: Union[str, AssistantSupportedModels, None] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - response_format: Optional[AssistantsAPIResponseFormatOptionParam] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] | NotGiven = NOT_GIVEN, - tools: Optional[Iterable[run_create_run_params.Tool]] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - Create a run. - - Args: - assistant_id: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this - run. - - include: A list of additional fields to include in the response. Currently the only - supported value is `step_details.tool_calls[*].file_search.results[*].content` - to fetch the file search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - - additional_instructions: Appends additional instructions at the end of the instructions for the run. This - is useful for modifying the behavior on a per-run basis without overriding other - instructions. - - additional_messages: Adds additional messages to the thread before creating the run. - - instructions: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of - the assistant. This is useful for modifying the behavior on a per-run basis. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - model: The ID of the [Model](/docs/api-reference/models) to be used to execute this - run. If a value is provided here, it will override the model associated with the - assistant. If not, the model associated with the assistant will be used. - - parallel_tool_calls: Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - - response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - tools: Override the tools the assistant can use for this run. This is useful for - modifying the behavior on a per-run basis. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - - truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._post( - f"/threads/{thread_id}/runs", - body=await async_maybe_transform( - { - "assistant_id": assistant_id, - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "parallel_tool_calls": parallel_tool_calls, - "reasoning_effort": reasoning_effort, - "response_format": response_format, - "stream": stream, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - }, - run_create_run_params.RunCreateRunParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"include": include}, run_create_run_params.RunCreateRunParams), - ), - cast_to=RunObject, - ) - - async def submit_tool_outputs( - self, - run_id: str, - *, - thread_id: str, - tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], - stream: Optional[bool] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunObject: - """ - When a run has the `status: "requires_action"` and `required_action.type` is - `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - tool calls once they're all completed. All outputs must be submitted in a single - request. - - Args: - tool_outputs: A list of tools for which the outputs are being submitted. - - stream: If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return await self._post( - f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", - body=await async_maybe_transform( - { - "tool_outputs": tool_outputs, - "stream": stream, - }, - run_submit_tool_outputs_params.RunSubmitToolOutputsParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RunObject, - ) - - -class RunsResourceWithRawResponse: - def __init__(self, runs: RunsResource) -> None: - self._runs = runs - - self.create = to_raw_response_wrapper( - runs.create, - ) - self.retrieve = to_raw_response_wrapper( - runs.retrieve, - ) - self.update = to_raw_response_wrapper( - runs.update, - ) - self.list = to_raw_response_wrapper( - runs.list, - ) - self.cancel = to_raw_response_wrapper( - runs.cancel, - ) - self.create_run = to_raw_response_wrapper( - runs.create_run, - ) - self.submit_tool_outputs = to_raw_response_wrapper( - runs.submit_tool_outputs, - ) - - @cached_property - def steps(self) -> StepsResourceWithRawResponse: - return StepsResourceWithRawResponse(self._runs.steps) - - -class AsyncRunsResourceWithRawResponse: - def __init__(self, runs: AsyncRunsResource) -> None: - self._runs = runs - - self.create = async_to_raw_response_wrapper( - runs.create, - ) - self.retrieve = async_to_raw_response_wrapper( - runs.retrieve, - ) - self.update = async_to_raw_response_wrapper( - runs.update, - ) - self.list = async_to_raw_response_wrapper( - runs.list, - ) - self.cancel = async_to_raw_response_wrapper( - runs.cancel, - ) - self.create_run = async_to_raw_response_wrapper( - runs.create_run, - ) - self.submit_tool_outputs = async_to_raw_response_wrapper( - runs.submit_tool_outputs, - ) - - @cached_property - def steps(self) -> AsyncStepsResourceWithRawResponse: - return AsyncStepsResourceWithRawResponse(self._runs.steps) - - -class RunsResourceWithStreamingResponse: - def __init__(self, runs: RunsResource) -> None: - self._runs = runs - - self.create = to_streamed_response_wrapper( - runs.create, - ) - self.retrieve = to_streamed_response_wrapper( - runs.retrieve, - ) - self.update = to_streamed_response_wrapper( - runs.update, - ) - self.list = to_streamed_response_wrapper( - runs.list, - ) - self.cancel = to_streamed_response_wrapper( - runs.cancel, - ) - self.create_run = to_streamed_response_wrapper( - runs.create_run, - ) - self.submit_tool_outputs = to_streamed_response_wrapper( - runs.submit_tool_outputs, - ) - - @cached_property - def steps(self) -> StepsResourceWithStreamingResponse: - return StepsResourceWithStreamingResponse(self._runs.steps) - - -class AsyncRunsResourceWithStreamingResponse: - def __init__(self, runs: AsyncRunsResource) -> None: - self._runs = runs - - self.create = async_to_streamed_response_wrapper( - runs.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - runs.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - runs.update, - ) - self.list = async_to_streamed_response_wrapper( - runs.list, - ) - self.cancel = async_to_streamed_response_wrapper( - runs.cancel, - ) - self.create_run = async_to_streamed_response_wrapper( - runs.create_run, - ) - self.submit_tool_outputs = async_to_streamed_response_wrapper( - runs.submit_tool_outputs, - ) - - @cached_property - def steps(self) -> AsyncStepsResourceWithStreamingResponse: - return AsyncStepsResourceWithStreamingResponse(self._runs.steps) diff --git a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py b/src/digitalocean_genai_sdk/resources/threads/runs/steps.py deleted file mode 100644 index bf0b8749..00000000 --- a/src/digitalocean_genai_sdk/resources/threads/runs/steps.py +++ /dev/null @@ -1,375 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.threads.runs import step_list_params, step_retrieve_params -from ....types.threads.runs.run_step_object import RunStepObject -from ....types.threads.runs.step_list_response import StepListResponse - -__all__ = ["StepsResource", "AsyncStepsResource"] - - -class StepsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> StepsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return StepsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> StepsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return StepsResourceWithStreamingResponse(self) - - def retrieve( - self, - step_id: str, - *, - thread_id: str, - run_id: str, - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunStepObject: - """ - Retrieves a run step. - - Args: - include: A list of additional fields to include in the response. Currently the only - supported value is `step_details.tool_calls[*].file_search.results[*].content` - to fetch the file search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - if not step_id: - raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") - return self._get( - f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), - ), - cast_to=RunStepObject, - ) - - def list( - self, - run_id: str, - *, - thread_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> StepListResponse: - """ - Returns a list of run steps belonging to a run. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - include: A list of additional fields to include in the response. Currently the only - supported value is `step_details.tool_calls[*].file_search.results[*].content` - to fetch the file search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return self._get( - f"/threads/{thread_id}/runs/{run_id}/steps", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "include": include, - "limit": limit, - "order": order, - }, - step_list_params.StepListParams, - ), - ), - cast_to=StepListResponse, - ) - - -class AsyncStepsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncStepsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncStepsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncStepsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncStepsResourceWithStreamingResponse(self) - - async def retrieve( - self, - step_id: str, - *, - thread_id: str, - run_id: str, - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RunStepObject: - """ - Retrieves a run step. - - Args: - include: A list of additional fields to include in the response. Currently the only - supported value is `step_details.tool_calls[*].file_search.results[*].content` - to fetch the file search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - if not step_id: - raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") - return await self._get( - f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), - ), - cast_to=RunStepObject, - ) - - async def list( - self, - run_id: str, - *, - thread_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> StepListResponse: - """ - Returns a list of run steps belonging to a run. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - include: A list of additional fields to include in the response. Currently the only - supported value is `step_details.tool_calls[*].file_search.results[*].content` - to fetch the file search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - if not run_id: - raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") - return await self._get( - f"/threads/{thread_id}/runs/{run_id}/steps", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "include": include, - "limit": limit, - "order": order, - }, - step_list_params.StepListParams, - ), - ), - cast_to=StepListResponse, - ) - - -class StepsResourceWithRawResponse: - def __init__(self, steps: StepsResource) -> None: - self._steps = steps - - self.retrieve = to_raw_response_wrapper( - steps.retrieve, - ) - self.list = to_raw_response_wrapper( - steps.list, - ) - - -class AsyncStepsResourceWithRawResponse: - def __init__(self, steps: AsyncStepsResource) -> None: - self._steps = steps - - self.retrieve = async_to_raw_response_wrapper( - steps.retrieve, - ) - self.list = async_to_raw_response_wrapper( - steps.list, - ) - - -class StepsResourceWithStreamingResponse: - def __init__(self, steps: StepsResource) -> None: - self._steps = steps - - self.retrieve = to_streamed_response_wrapper( - steps.retrieve, - ) - self.list = to_streamed_response_wrapper( - steps.list, - ) - - -class AsyncStepsResourceWithStreamingResponse: - def __init__(self, steps: AsyncStepsResource) -> None: - self._steps = steps - - self.retrieve = async_to_streamed_response_wrapper( - steps.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - steps.list, - ) diff --git a/src/digitalocean_genai_sdk/resources/threads/threads.py b/src/digitalocean_genai_sdk/resources/threads/threads.py deleted file mode 100644 index 5fdf5ea8..00000000 --- a/src/digitalocean_genai_sdk/resources/threads/threads.py +++ /dev/null @@ -1,553 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Iterable, Optional - -import httpx - -from ...types import thread_create_params, thread_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from .messages import ( - MessagesResource, - AsyncMessagesResource, - MessagesResourceWithRawResponse, - AsyncMessagesResourceWithRawResponse, - MessagesResourceWithStreamingResponse, - AsyncMessagesResourceWithStreamingResponse, -) -from ..._compat import cached_property -from .runs.runs import ( - RunsResource, - AsyncRunsResource, - RunsResourceWithRawResponse, - AsyncRunsResourceWithRawResponse, - RunsResourceWithStreamingResponse, - AsyncRunsResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.thread_object import ThreadObject -from ...types.thread_delete_response import ThreadDeleteResponse -from ...types.threads.create_message_request_param import CreateMessageRequestParam - -__all__ = ["ThreadsResource", "AsyncThreadsResource"] - - -class ThreadsResource(SyncAPIResource): - @cached_property - def runs(self) -> RunsResource: - return RunsResource(self._client) - - @cached_property - def messages(self) -> MessagesResource: - return MessagesResource(self._client) - - @cached_property - def with_raw_response(self) -> ThreadsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ThreadsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ThreadsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ThreadsResourceWithStreamingResponse(self) - - def create( - self, - *, - messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadObject: - """ - Create a thread. - - Args: - messages: A list of [messages](/docs/api-reference/messages) to start the thread with. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - tool_resources: A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/threads", - body=maybe_transform( - { - "messages": messages, - "metadata": metadata, - "tool_resources": tool_resources, - }, - thread_create_params.ThreadCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadObject, - ) - - def retrieve( - self, - thread_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadObject: - """ - Retrieves a thread. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._get( - f"/threads/{thread_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadObject, - ) - - def update( - self, - thread_id: str, - *, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadObject: - """ - Modifies a thread. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - tool_resources: A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._post( - f"/threads/{thread_id}", - body=maybe_transform( - { - "metadata": metadata, - "tool_resources": tool_resources, - }, - thread_update_params.ThreadUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadObject, - ) - - def delete( - self, - thread_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadDeleteResponse: - """ - Delete a thread. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return self._delete( - f"/threads/{thread_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadDeleteResponse, - ) - - -class AsyncThreadsResource(AsyncAPIResource): - @cached_property - def runs(self) -> AsyncRunsResource: - return AsyncRunsResource(self._client) - - @cached_property - def messages(self) -> AsyncMessagesResource: - return AsyncMessagesResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncThreadsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncThreadsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncThreadsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncThreadsResourceWithStreamingResponse(self) - - async def create( - self, - *, - messages: Iterable[CreateMessageRequestParam] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadObject: - """ - Create a thread. - - Args: - messages: A list of [messages](/docs/api-reference/messages) to start the thread with. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - tool_resources: A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/threads", - body=await async_maybe_transform( - { - "messages": messages, - "metadata": metadata, - "tool_resources": tool_resources, - }, - thread_create_params.ThreadCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadObject, - ) - - async def retrieve( - self, - thread_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadObject: - """ - Retrieves a thread. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._get( - f"/threads/{thread_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadObject, - ) - - async def update( - self, - thread_id: str, - *, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadObject: - """ - Modifies a thread. - - Args: - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - tool_resources: A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._post( - f"/threads/{thread_id}", - body=await async_maybe_transform( - { - "metadata": metadata, - "tool_resources": tool_resources, - }, - thread_update_params.ThreadUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadObject, - ) - - async def delete( - self, - thread_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ThreadDeleteResponse: - """ - Delete a thread. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not thread_id: - raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") - return await self._delete( - f"/threads/{thread_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ThreadDeleteResponse, - ) - - -class ThreadsResourceWithRawResponse: - def __init__(self, threads: ThreadsResource) -> None: - self._threads = threads - - self.create = to_raw_response_wrapper( - threads.create, - ) - self.retrieve = to_raw_response_wrapper( - threads.retrieve, - ) - self.update = to_raw_response_wrapper( - threads.update, - ) - self.delete = to_raw_response_wrapper( - threads.delete, - ) - - @cached_property - def runs(self) -> RunsResourceWithRawResponse: - return RunsResourceWithRawResponse(self._threads.runs) - - @cached_property - def messages(self) -> MessagesResourceWithRawResponse: - return MessagesResourceWithRawResponse(self._threads.messages) - - -class AsyncThreadsResourceWithRawResponse: - def __init__(self, threads: AsyncThreadsResource) -> None: - self._threads = threads - - self.create = async_to_raw_response_wrapper( - threads.create, - ) - self.retrieve = async_to_raw_response_wrapper( - threads.retrieve, - ) - self.update = async_to_raw_response_wrapper( - threads.update, - ) - self.delete = async_to_raw_response_wrapper( - threads.delete, - ) - - @cached_property - def runs(self) -> AsyncRunsResourceWithRawResponse: - return AsyncRunsResourceWithRawResponse(self._threads.runs) - - @cached_property - def messages(self) -> AsyncMessagesResourceWithRawResponse: - return AsyncMessagesResourceWithRawResponse(self._threads.messages) - - -class ThreadsResourceWithStreamingResponse: - def __init__(self, threads: ThreadsResource) -> None: - self._threads = threads - - self.create = to_streamed_response_wrapper( - threads.create, - ) - self.retrieve = to_streamed_response_wrapper( - threads.retrieve, - ) - self.update = to_streamed_response_wrapper( - threads.update, - ) - self.delete = to_streamed_response_wrapper( - threads.delete, - ) - - @cached_property - def runs(self) -> RunsResourceWithStreamingResponse: - return RunsResourceWithStreamingResponse(self._threads.runs) - - @cached_property - def messages(self) -> MessagesResourceWithStreamingResponse: - return MessagesResourceWithStreamingResponse(self._threads.messages) - - -class AsyncThreadsResourceWithStreamingResponse: - def __init__(self, threads: AsyncThreadsResource) -> None: - self._threads = threads - - self.create = async_to_streamed_response_wrapper( - threads.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - threads.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - threads.update, - ) - self.delete = async_to_streamed_response_wrapper( - threads.delete, - ) - - @cached_property - def runs(self) -> AsyncRunsResourceWithStreamingResponse: - return AsyncRunsResourceWithStreamingResponse(self._threads.runs) - - @cached_property - def messages(self) -> AsyncMessagesResourceWithStreamingResponse: - return AsyncMessagesResourceWithStreamingResponse(self._threads.messages) diff --git a/src/digitalocean_genai_sdk/resources/uploads.py b/src/digitalocean_genai_sdk/resources/uploads.py deleted file mode 100644 index d36ad23e..00000000 --- a/src/digitalocean_genai_sdk/resources/uploads.py +++ /dev/null @@ -1,573 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Mapping, cast -from typing_extensions import Literal - -import httpx - -from ..types import upload_create_params, upload_add_part_params, upload_complete_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.upload import Upload -from ..types.upload_add_part_response import UploadAddPartResponse - -__all__ = ["UploadsResource", "AsyncUploadsResource"] - - -class UploadsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> UploadsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return UploadsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> UploadsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return UploadsResourceWithStreamingResponse(self) - - def create( - self, - *, - bytes: int, - filename: str, - mime_type: str, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Upload: - """ - Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that - you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an - Upload can accept at most 8 GB in total and expires after an hour after you - create it. - - Once you complete the Upload, we will create a - [File](/docs/api-reference/files/object) object that contains all the parts you - uploaded. This File is usable in the rest of our platform as a regular File - object. - - For certain `purpose` values, the correct `mime_type` must be specified. Please - refer to documentation for the - [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). - - For guidance on the proper filename extensions for each purpose, please follow - the documentation on [creating a File](/docs/api-reference/files/create). - - Args: - bytes: The number of bytes in the file you are uploading. - - filename: The name of the file to upload. - - mime_type: The MIME type of the file. - - This must fall within the supported MIME types for your file purpose. See the - supported MIME types for assistants and vision. - - purpose: The intended purpose of the uploaded file. - - See the - [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/uploads", - body=maybe_transform( - { - "bytes": bytes, - "filename": filename, - "mime_type": mime_type, - "purpose": purpose, - }, - upload_create_params.UploadCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Upload, - ) - - def add_part( - self, - upload_id: str, - *, - data: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UploadAddPartResponse: - """ - Adds a [Part](/docs/api-reference/uploads/part-object) to an - [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk - of bytes from the file you are trying to upload. - - Each Part can be at most 64 MB, and you can add Parts until you hit the Upload - maximum of 8 GB. - - It is possible to add multiple Parts in parallel. You can decide the intended - order of the Parts when you - [complete the Upload](/docs/api-reference/uploads/complete). - - Args: - data: The chunk of bytes for this Part. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not upload_id: - raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") - body = deepcopy_minimal({"data": data}) - files = extract_files(cast(Mapping[str, object], body), paths=[["data"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - f"/uploads/{upload_id}/parts", - body=maybe_transform(body, upload_add_part_params.UploadAddPartParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=UploadAddPartResponse, - ) - - def cancel( - self, - upload_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Upload: - """Cancels the Upload. - - No Parts may be added after an Upload is cancelled. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not upload_id: - raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") - return self._post( - f"/uploads/{upload_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Upload, - ) - - def complete( - self, - upload_id: str, - *, - part_ids: List[str], - md5: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Upload: - """ - Completes the [Upload](/docs/api-reference/uploads/object). - - Within the returned Upload object, there is a nested - [File](/docs/api-reference/files/object) object that is ready to use in the rest - of the platform. - - You can specify the order of the Parts by passing in an ordered list of the Part - IDs. - - The number of bytes uploaded upon completion must match the number of bytes - initially specified when creating the Upload object. No Parts may be added after - an Upload is completed. - - Args: - part_ids: The ordered list of Part IDs. - - md5: The optional md5 checksum for the file contents to verify if the bytes uploaded - matches what you expect. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not upload_id: - raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") - return self._post( - f"/uploads/{upload_id}/complete", - body=maybe_transform( - { - "part_ids": part_ids, - "md5": md5, - }, - upload_complete_params.UploadCompleteParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Upload, - ) - - -class AsyncUploadsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncUploadsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncUploadsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncUploadsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncUploadsResourceWithStreamingResponse(self) - - async def create( - self, - *, - bytes: int, - filename: str, - mime_type: str, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Upload: - """ - Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that - you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an - Upload can accept at most 8 GB in total and expires after an hour after you - create it. - - Once you complete the Upload, we will create a - [File](/docs/api-reference/files/object) object that contains all the parts you - uploaded. This File is usable in the rest of our platform as a regular File - object. - - For certain `purpose` values, the correct `mime_type` must be specified. Please - refer to documentation for the - [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). - - For guidance on the proper filename extensions for each purpose, please follow - the documentation on [creating a File](/docs/api-reference/files/create). - - Args: - bytes: The number of bytes in the file you are uploading. - - filename: The name of the file to upload. - - mime_type: The MIME type of the file. - - This must fall within the supported MIME types for your file purpose. See the - supported MIME types for assistants and vision. - - purpose: The intended purpose of the uploaded file. - - See the - [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/uploads", - body=await async_maybe_transform( - { - "bytes": bytes, - "filename": filename, - "mime_type": mime_type, - "purpose": purpose, - }, - upload_create_params.UploadCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Upload, - ) - - async def add_part( - self, - upload_id: str, - *, - data: FileTypes, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> UploadAddPartResponse: - """ - Adds a [Part](/docs/api-reference/uploads/part-object) to an - [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk - of bytes from the file you are trying to upload. - - Each Part can be at most 64 MB, and you can add Parts until you hit the Upload - maximum of 8 GB. - - It is possible to add multiple Parts in parallel. You can decide the intended - order of the Parts when you - [complete the Upload](/docs/api-reference/uploads/complete). - - Args: - data: The chunk of bytes for this Part. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not upload_id: - raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") - body = deepcopy_minimal({"data": data}) - files = extract_files(cast(Mapping[str, object], body), paths=[["data"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - f"/uploads/{upload_id}/parts", - body=await async_maybe_transform(body, upload_add_part_params.UploadAddPartParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=UploadAddPartResponse, - ) - - async def cancel( - self, - upload_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Upload: - """Cancels the Upload. - - No Parts may be added after an Upload is cancelled. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not upload_id: - raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") - return await self._post( - f"/uploads/{upload_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Upload, - ) - - async def complete( - self, - upload_id: str, - *, - part_ids: List[str], - md5: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Upload: - """ - Completes the [Upload](/docs/api-reference/uploads/object). - - Within the returned Upload object, there is a nested - [File](/docs/api-reference/files/object) object that is ready to use in the rest - of the platform. - - You can specify the order of the Parts by passing in an ordered list of the Part - IDs. - - The number of bytes uploaded upon completion must match the number of bytes - initially specified when creating the Upload object. No Parts may be added after - an Upload is completed. - - Args: - part_ids: The ordered list of Part IDs. - - md5: The optional md5 checksum for the file contents to verify if the bytes uploaded - matches what you expect. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not upload_id: - raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}") - return await self._post( - f"/uploads/{upload_id}/complete", - body=await async_maybe_transform( - { - "part_ids": part_ids, - "md5": md5, - }, - upload_complete_params.UploadCompleteParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Upload, - ) - - -class UploadsResourceWithRawResponse: - def __init__(self, uploads: UploadsResource) -> None: - self._uploads = uploads - - self.create = to_raw_response_wrapper( - uploads.create, - ) - self.add_part = to_raw_response_wrapper( - uploads.add_part, - ) - self.cancel = to_raw_response_wrapper( - uploads.cancel, - ) - self.complete = to_raw_response_wrapper( - uploads.complete, - ) - - -class AsyncUploadsResourceWithRawResponse: - def __init__(self, uploads: AsyncUploadsResource) -> None: - self._uploads = uploads - - self.create = async_to_raw_response_wrapper( - uploads.create, - ) - self.add_part = async_to_raw_response_wrapper( - uploads.add_part, - ) - self.cancel = async_to_raw_response_wrapper( - uploads.cancel, - ) - self.complete = async_to_raw_response_wrapper( - uploads.complete, - ) - - -class UploadsResourceWithStreamingResponse: - def __init__(self, uploads: UploadsResource) -> None: - self._uploads = uploads - - self.create = to_streamed_response_wrapper( - uploads.create, - ) - self.add_part = to_streamed_response_wrapper( - uploads.add_part, - ) - self.cancel = to_streamed_response_wrapper( - uploads.cancel, - ) - self.complete = to_streamed_response_wrapper( - uploads.complete, - ) - - -class AsyncUploadsResourceWithStreamingResponse: - def __init__(self, uploads: AsyncUploadsResource) -> None: - self._uploads = uploads - - self.create = async_to_streamed_response_wrapper( - uploads.create, - ) - self.add_part = async_to_streamed_response_wrapper( - uploads.add_part, - ) - self.cancel = async_to_streamed_response_wrapper( - uploads.cancel, - ) - self.complete = async_to_streamed_response_wrapper( - uploads.complete, - ) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py b/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py deleted file mode 100644 index a754f147..00000000 --- a/src/digitalocean_genai_sdk/resources/vector_stores/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .files import ( - FilesResource, - AsyncFilesResource, - FilesResourceWithRawResponse, - AsyncFilesResourceWithRawResponse, - FilesResourceWithStreamingResponse, - AsyncFilesResourceWithStreamingResponse, -) -from .file_batches import ( - FileBatchesResource, - AsyncFileBatchesResource, - FileBatchesResourceWithRawResponse, - AsyncFileBatchesResourceWithRawResponse, - FileBatchesResourceWithStreamingResponse, - AsyncFileBatchesResourceWithStreamingResponse, -) -from .vector_stores import ( - VectorStoresResource, - AsyncVectorStoresResource, - VectorStoresResourceWithRawResponse, - AsyncVectorStoresResourceWithRawResponse, - VectorStoresResourceWithStreamingResponse, - AsyncVectorStoresResourceWithStreamingResponse, -) - -__all__ = [ - "FileBatchesResource", - "AsyncFileBatchesResource", - "FileBatchesResourceWithRawResponse", - "AsyncFileBatchesResourceWithRawResponse", - "FileBatchesResourceWithStreamingResponse", - "AsyncFileBatchesResourceWithStreamingResponse", - "FilesResource", - "AsyncFilesResource", - "FilesResourceWithRawResponse", - "AsyncFilesResourceWithRawResponse", - "FilesResourceWithStreamingResponse", - "AsyncFilesResourceWithStreamingResponse", - "VectorStoresResource", - "AsyncVectorStoresResource", - "VectorStoresResourceWithRawResponse", - "AsyncVectorStoresResourceWithRawResponse", - "VectorStoresResourceWithStreamingResponse", - "AsyncVectorStoresResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py b/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py deleted file mode 100644 index d6dba5f9..00000000 --- a/src/digitalocean_genai_sdk/resources/vector_stores/file_batches.py +++ /dev/null @@ -1,544 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.vector_stores import ChunkingStrategyRequestParam, file_batch_create_params, file_batch_list_files_params -from ...types.vector_stores.vector_store_file_batch_object import VectorStoreFileBatchObject -from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam -from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse - -__all__ = ["FileBatchesResource", "AsyncFileBatchesResource"] - - -class FileBatchesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> FileBatchesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return FileBatchesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FileBatchesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return FileBatchesResourceWithStreamingResponse(self) - - def create( - self, - vector_store_id: str, - *, - file_ids: List[str], - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatchObject: - """ - Create a vector store file batch. - - Args: - file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should - use. Useful for tools like `file_search` that can access files. - - attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters, booleans, or numbers. - - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._post( - f"/vector_stores/{vector_store_id}/file_batches", - body=maybe_transform( - { - "file_ids": file_ids, - "attributes": attributes, - "chunking_strategy": chunking_strategy, - }, - file_batch_create_params.FileBatchCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileBatchObject, - ) - - def retrieve( - self, - batch_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatchObject: - """ - Retrieves a vector store file batch. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._get( - f"/vector_stores/{vector_store_id}/file_batches/{batch_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileBatchObject, - ) - - def cancel( - self, - batch_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatchObject: - """Cancel a vector store file batch. - - This attempts to cancel the processing of - files in this batch as soon as possible. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._post( - f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileBatchObject, - ) - - def list_files( - self, - batch_id: str, - *, - vector_store_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListVectorStoreFilesResponse: - """ - Returns a list of vector store files in a batch. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._get( - f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "filter": filter, - "limit": limit, - "order": order, - }, - file_batch_list_files_params.FileBatchListFilesParams, - ), - ), - cast_to=ListVectorStoreFilesResponse, - ) - - -class AsyncFileBatchesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncFileBatchesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFileBatchesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFileBatchesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncFileBatchesResourceWithStreamingResponse(self) - - async def create( - self, - vector_store_id: str, - *, - file_ids: List[str], - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatchObject: - """ - Create a vector store file batch. - - Args: - file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should - use. Useful for tools like `file_search` that can access files. - - attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters, booleans, or numbers. - - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._post( - f"/vector_stores/{vector_store_id}/file_batches", - body=await async_maybe_transform( - { - "file_ids": file_ids, - "attributes": attributes, - "chunking_strategy": chunking_strategy, - }, - file_batch_create_params.FileBatchCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileBatchObject, - ) - - async def retrieve( - self, - batch_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatchObject: - """ - Retrieves a vector store file batch. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return await self._get( - f"/vector_stores/{vector_store_id}/file_batches/{batch_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileBatchObject, - ) - - async def cancel( - self, - batch_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileBatchObject: - """Cancel a vector store file batch. - - This attempts to cancel the processing of - files in this batch as soon as possible. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return await self._post( - f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileBatchObject, - ) - - async def list_files( - self, - batch_id: str, - *, - vector_store_id: str, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListVectorStoreFilesResponse: - """ - Returns a list of vector store files in a batch. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return await self._get( - f"/vector_stores/{vector_store_id}/file_batches/{batch_id}/files", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "filter": filter, - "limit": limit, - "order": order, - }, - file_batch_list_files_params.FileBatchListFilesParams, - ), - ), - cast_to=ListVectorStoreFilesResponse, - ) - - -class FileBatchesResourceWithRawResponse: - def __init__(self, file_batches: FileBatchesResource) -> None: - self._file_batches = file_batches - - self.create = to_raw_response_wrapper( - file_batches.create, - ) - self.retrieve = to_raw_response_wrapper( - file_batches.retrieve, - ) - self.cancel = to_raw_response_wrapper( - file_batches.cancel, - ) - self.list_files = to_raw_response_wrapper( - file_batches.list_files, - ) - - -class AsyncFileBatchesResourceWithRawResponse: - def __init__(self, file_batches: AsyncFileBatchesResource) -> None: - self._file_batches = file_batches - - self.create = async_to_raw_response_wrapper( - file_batches.create, - ) - self.retrieve = async_to_raw_response_wrapper( - file_batches.retrieve, - ) - self.cancel = async_to_raw_response_wrapper( - file_batches.cancel, - ) - self.list_files = async_to_raw_response_wrapper( - file_batches.list_files, - ) - - -class FileBatchesResourceWithStreamingResponse: - def __init__(self, file_batches: FileBatchesResource) -> None: - self._file_batches = file_batches - - self.create = to_streamed_response_wrapper( - file_batches.create, - ) - self.retrieve = to_streamed_response_wrapper( - file_batches.retrieve, - ) - self.cancel = to_streamed_response_wrapper( - file_batches.cancel, - ) - self.list_files = to_streamed_response_wrapper( - file_batches.list_files, - ) - - -class AsyncFileBatchesResourceWithStreamingResponse: - def __init__(self, file_batches: AsyncFileBatchesResource) -> None: - self._file_batches = file_batches - - self.create = async_to_streamed_response_wrapper( - file_batches.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - file_batches.retrieve, - ) - self.cancel = async_to_streamed_response_wrapper( - file_batches.cancel, - ) - self.list_files = async_to_streamed_response_wrapper( - file_batches.list_files, - ) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/files.py b/src/digitalocean_genai_sdk/resources/vector_stores/files.py deleted file mode 100644 index 84576820..00000000 --- a/src/digitalocean_genai_sdk/resources/vector_stores/files.py +++ /dev/null @@ -1,733 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Optional -from typing_extensions import Literal - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.vector_stores import ( - ChunkingStrategyRequestParam, - file_list_params, - file_create_params, - file_update_params, -) -from ...types.vector_stores.file_delete_response import FileDeleteResponse -from ...types.vector_stores.vector_store_file_object import VectorStoreFileObject -from ...types.vector_stores.file_retrieve_content_response import FileRetrieveContentResponse -from ...types.vector_stores.chunking_strategy_request_param import ChunkingStrategyRequestParam -from ...types.vector_stores.list_vector_store_files_response import ListVectorStoreFilesResponse - -__all__ = ["FilesResource", "AsyncFilesResource"] - - -class FilesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> FilesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return FilesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FilesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return FilesResourceWithStreamingResponse(self) - - def create( - self, - vector_store_id: str, - *, - file_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileObject: - """ - Create a vector store file by attaching a [File](/docs/api-reference/files) to a - [vector store](/docs/api-reference/vector-stores/object). - - Args: - file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful - for tools like `file_search` that can access files. - - attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters, booleans, or numbers. - - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._post( - f"/vector_stores/{vector_store_id}/files", - body=maybe_transform( - { - "file_id": file_id, - "attributes": attributes, - "chunking_strategy": chunking_strategy, - }, - file_create_params.FileCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileObject, - ) - - def retrieve( - self, - file_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileObject: - """ - Retrieves a vector store file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._get( - f"/vector_stores/{vector_store_id}/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileObject, - ) - - def update( - self, - file_id: str, - *, - vector_store_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileObject: - """ - Update attributes on a vector store file. - - Args: - attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters, booleans, or numbers. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._post( - f"/vector_stores/{vector_store_id}/files/{file_id}", - body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileObject, - ) - - def list( - self, - vector_store_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListVectorStoreFilesResponse: - """ - Returns a list of vector store files. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._get( - f"/vector_stores/{vector_store_id}/files", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "filter": filter, - "limit": limit, - "order": order, - }, - file_list_params.FileListParams, - ), - ), - cast_to=ListVectorStoreFilesResponse, - ) - - def delete( - self, - file_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileDeleteResponse: - """Delete a vector store file. - - This will remove the file from the vector store but - the file itself will not be deleted. To delete the file, use the - [delete file](/docs/api-reference/files/delete) endpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._delete( - f"/vector_stores/{vector_store_id}/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FileDeleteResponse, - ) - - def retrieve_content( - self, - file_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileRetrieveContentResponse: - """ - Retrieve the parsed contents of a vector store file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._get( - f"/vector_stores/{vector_store_id}/files/{file_id}/content", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FileRetrieveContentResponse, - ) - - -class AsyncFilesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncFilesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFilesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncFilesResourceWithStreamingResponse(self) - - async def create( - self, - vector_store_id: str, - *, - file_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, - chunking_strategy: ChunkingStrategyRequestParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileObject: - """ - Create a vector store file by attaching a [File](/docs/api-reference/files) to a - [vector store](/docs/api-reference/vector-stores/object). - - Args: - file_id: A [File](/docs/api-reference/files) ID that the vector store should use. Useful - for tools like `file_search` that can access files. - - attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters, booleans, or numbers. - - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._post( - f"/vector_stores/{vector_store_id}/files", - body=await async_maybe_transform( - { - "file_id": file_id, - "attributes": attributes, - "chunking_strategy": chunking_strategy, - }, - file_create_params.FileCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileObject, - ) - - async def retrieve( - self, - file_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileObject: - """ - Retrieves a vector store file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._get( - f"/vector_stores/{vector_store_id}/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileObject, - ) - - async def update( - self, - file_id: str, - *, - vector_store_id: str, - attributes: Optional[Dict[str, Union[str, float, bool]]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreFileObject: - """ - Update attributes on a vector store file. - - Args: - attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. Keys are strings with a maximum - length of 64 characters. Values are strings with a maximum length of 512 - characters, booleans, or numbers. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._post( - f"/vector_stores/{vector_store_id}/files/{file_id}", - body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreFileObject, - ) - - async def list( - self, - vector_store_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - filter: Literal["in_progress", "completed", "failed", "cancelled"] | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListVectorStoreFilesResponse: - """ - Returns a list of vector store files. - - Args: - after: A cursor for use in pagination. `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._get( - f"/vector_stores/{vector_store_id}/files", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "filter": filter, - "limit": limit, - "order": order, - }, - file_list_params.FileListParams, - ), - ), - cast_to=ListVectorStoreFilesResponse, - ) - - async def delete( - self, - file_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileDeleteResponse: - """Delete a vector store file. - - This will remove the file from the vector store but - the file itself will not be deleted. To delete the file, use the - [delete file](/docs/api-reference/files/delete) endpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._delete( - f"/vector_stores/{vector_store_id}/files/{file_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FileDeleteResponse, - ) - - async def retrieve_content( - self, - file_id: str, - *, - vector_store_id: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileRetrieveContentResponse: - """ - Retrieve the parsed contents of a vector store file. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return await self._get( - f"/vector_stores/{vector_store_id}/files/{file_id}/content", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FileRetrieveContentResponse, - ) - - -class FilesResourceWithRawResponse: - def __init__(self, files: FilesResource) -> None: - self._files = files - - self.create = to_raw_response_wrapper( - files.create, - ) - self.retrieve = to_raw_response_wrapper( - files.retrieve, - ) - self.update = to_raw_response_wrapper( - files.update, - ) - self.list = to_raw_response_wrapper( - files.list, - ) - self.delete = to_raw_response_wrapper( - files.delete, - ) - self.retrieve_content = to_raw_response_wrapper( - files.retrieve_content, - ) - - -class AsyncFilesResourceWithRawResponse: - def __init__(self, files: AsyncFilesResource) -> None: - self._files = files - - self.create = async_to_raw_response_wrapper( - files.create, - ) - self.retrieve = async_to_raw_response_wrapper( - files.retrieve, - ) - self.update = async_to_raw_response_wrapper( - files.update, - ) - self.list = async_to_raw_response_wrapper( - files.list, - ) - self.delete = async_to_raw_response_wrapper( - files.delete, - ) - self.retrieve_content = async_to_raw_response_wrapper( - files.retrieve_content, - ) - - -class FilesResourceWithStreamingResponse: - def __init__(self, files: FilesResource) -> None: - self._files = files - - self.create = to_streamed_response_wrapper( - files.create, - ) - self.retrieve = to_streamed_response_wrapper( - files.retrieve, - ) - self.update = to_streamed_response_wrapper( - files.update, - ) - self.list = to_streamed_response_wrapper( - files.list, - ) - self.delete = to_streamed_response_wrapper( - files.delete, - ) - self.retrieve_content = to_streamed_response_wrapper( - files.retrieve_content, - ) - - -class AsyncFilesResourceWithStreamingResponse: - def __init__(self, files: AsyncFilesResource) -> None: - self._files = files - - self.create = async_to_streamed_response_wrapper( - files.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - files.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - files.update, - ) - self.list = async_to_streamed_response_wrapper( - files.list, - ) - self.delete = async_to_streamed_response_wrapper( - files.delete, - ) - self.retrieve_content = async_to_streamed_response_wrapper( - files.retrieve_content, - ) diff --git a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py b/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py deleted file mode 100644 index 0e80da49..00000000 --- a/src/digitalocean_genai_sdk/resources/vector_stores/vector_stores.py +++ /dev/null @@ -1,847 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -import httpx - -from .files import ( - FilesResource, - AsyncFilesResource, - FilesResourceWithRawResponse, - AsyncFilesResourceWithRawResponse, - FilesResourceWithStreamingResponse, - AsyncFilesResourceWithStreamingResponse, -) -from ...types import ( - vector_store_list_params, - vector_store_create_params, - vector_store_search_params, - vector_store_update_params, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .file_batches import ( - FileBatchesResource, - AsyncFileBatchesResource, - FileBatchesResourceWithRawResponse, - AsyncFileBatchesResourceWithRawResponse, - FileBatchesResourceWithStreamingResponse, - AsyncFileBatchesResourceWithStreamingResponse, -) -from ..._base_client import make_request_options -from ...types.vector_store_object import VectorStoreObject -from ...types.vector_store_list_response import VectorStoreListResponse -from ...types.vector_store_delete_response import VectorStoreDeleteResponse -from ...types.vector_store_search_response import VectorStoreSearchResponse -from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam - -__all__ = ["VectorStoresResource", "AsyncVectorStoresResource"] - - -class VectorStoresResource(SyncAPIResource): - @cached_property - def file_batches(self) -> FileBatchesResource: - return FileBatchesResource(self._client) - - @cached_property - def files(self) -> FilesResource: - return FilesResource(self._client) - - @cached_property - def with_raw_response(self) -> VectorStoresResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return VectorStoresResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return VectorStoresResourceWithStreamingResponse(self) - - def create( - self, - *, - chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, - expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, - file_ids: List[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreObject: - """ - Create a vector store. - - Args: - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. Only applicable if `file_ids` is non-empty. - - expires_after: The expiration policy for a vector store. - - file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should - use. Useful for tools like `file_search` that can access files. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - name: The name of the vector store. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/vector_stores", - body=maybe_transform( - { - "chunking_strategy": chunking_strategy, - "expires_after": expires_after, - "file_ids": file_ids, - "metadata": metadata, - "name": name, - }, - vector_store_create_params.VectorStoreCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreObject, - ) - - def retrieve( - self, - vector_store_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreObject: - """ - Retrieves a vector store. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._get( - f"/vector_stores/{vector_store_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreObject, - ) - - def update( - self, - vector_store_id: str, - *, - expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreObject: - """ - Modifies a vector store. - - Args: - expires_after: The expiration policy for a vector store. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - name: The name of the vector store. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._post( - f"/vector_stores/{vector_store_id}", - body=maybe_transform( - { - "expires_after": expires_after, - "metadata": metadata, - "name": name, - }, - vector_store_update_params.VectorStoreUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreObject, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreListResponse: - """Returns a list of vector stores. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/vector_stores", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - vector_store_list_params.VectorStoreListParams, - ), - ), - cast_to=VectorStoreListResponse, - ) - - def delete( - self, - vector_store_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreDeleteResponse: - """ - Delete a vector store. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._delete( - f"/vector_stores/{vector_store_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreDeleteResponse, - ) - - def search( - self, - vector_store_id: str, - *, - query: Union[str, List[str]], - filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, - max_num_results: int | NotGiven = NOT_GIVEN, - ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, - rewrite_query: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreSearchResponse: - """ - Search a vector store for relevant chunks based on a query and file attributes - filter. - - Args: - query: A query string for a search - - filters: A filter to apply based on file attributes. - - max_num_results: The maximum number of results to return. This number should be between 1 and 50 - inclusive. - - ranking_options: Ranking options for search. - - rewrite_query: Whether to rewrite the natural language query for vector search. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return self._post( - f"/vector_stores/{vector_store_id}/search", - body=maybe_transform( - { - "query": query, - "filters": filters, - "max_num_results": max_num_results, - "ranking_options": ranking_options, - "rewrite_query": rewrite_query, - }, - vector_store_search_params.VectorStoreSearchParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreSearchResponse, - ) - - -class AsyncVectorStoresResource(AsyncAPIResource): - @cached_property - def file_batches(self) -> AsyncFileBatchesResource: - return AsyncFileBatchesResource(self._client) - - @cached_property - def files(self) -> AsyncFilesResource: - return AsyncFilesResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncVectorStoresResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncVectorStoresResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncVectorStoresResourceWithStreamingResponse(self) - - async def create( - self, - *, - chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, - expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, - file_ids: List[str] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreObject: - """ - Create a vector store. - - Args: - chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. Only applicable if `file_ids` is non-empty. - - expires_after: The expiration policy for a vector store. - - file_ids: A list of [File](/docs/api-reference/files) IDs that the vector store should - use. Useful for tools like `file_search` that can access files. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - name: The name of the vector store. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/vector_stores", - body=await async_maybe_transform( - { - "chunking_strategy": chunking_strategy, - "expires_after": expires_after, - "file_ids": file_ids, - "metadata": metadata, - "name": name, - }, - vector_store_create_params.VectorStoreCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreObject, - ) - - async def retrieve( - self, - vector_store_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreObject: - """ - Retrieves a vector store. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._get( - f"/vector_stores/{vector_store_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreObject, - ) - - async def update( - self, - vector_store_id: str, - *, - expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - name: Optional[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreObject: - """ - Modifies a vector store. - - Args: - expires_after: The expiration policy for a vector store. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - name: The name of the vector store. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._post( - f"/vector_stores/{vector_store_id}", - body=await async_maybe_transform( - { - "expires_after": expires_after, - "metadata": metadata, - "name": name, - }, - vector_store_update_params.VectorStoreUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreObject, - ) - - async def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreListResponse: - """Returns a list of vector stores. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - before: A cursor for use in pagination. `before` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - starting with obj_foo, your subsequent call can include before=obj_foo in order - to fetch the previous page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/vector_stores", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "after": after, - "before": before, - "limit": limit, - "order": order, - }, - vector_store_list_params.VectorStoreListParams, - ), - ), - cast_to=VectorStoreListResponse, - ) - - async def delete( - self, - vector_store_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreDeleteResponse: - """ - Delete a vector store. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._delete( - f"/vector_stores/{vector_store_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreDeleteResponse, - ) - - async def search( - self, - vector_store_id: str, - *, - query: Union[str, List[str]], - filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, - max_num_results: int | NotGiven = NOT_GIVEN, - ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, - rewrite_query: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VectorStoreSearchResponse: - """ - Search a vector store for relevant chunks based on a query and file attributes - filter. - - Args: - query: A query string for a search - - filters: A filter to apply based on file attributes. - - max_num_results: The maximum number of results to return. This number should be between 1 and 50 - inclusive. - - ranking_options: Ranking options for search. - - rewrite_query: Whether to rewrite the natural language query for vector search. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not vector_store_id: - raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}") - return await self._post( - f"/vector_stores/{vector_store_id}/search", - body=await async_maybe_transform( - { - "query": query, - "filters": filters, - "max_num_results": max_num_results, - "ranking_options": ranking_options, - "rewrite_query": rewrite_query, - }, - vector_store_search_params.VectorStoreSearchParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VectorStoreSearchResponse, - ) - - -class VectorStoresResourceWithRawResponse: - def __init__(self, vector_stores: VectorStoresResource) -> None: - self._vector_stores = vector_stores - - self.create = to_raw_response_wrapper( - vector_stores.create, - ) - self.retrieve = to_raw_response_wrapper( - vector_stores.retrieve, - ) - self.update = to_raw_response_wrapper( - vector_stores.update, - ) - self.list = to_raw_response_wrapper( - vector_stores.list, - ) - self.delete = to_raw_response_wrapper( - vector_stores.delete, - ) - self.search = to_raw_response_wrapper( - vector_stores.search, - ) - - @cached_property - def file_batches(self) -> FileBatchesResourceWithRawResponse: - return FileBatchesResourceWithRawResponse(self._vector_stores.file_batches) - - @cached_property - def files(self) -> FilesResourceWithRawResponse: - return FilesResourceWithRawResponse(self._vector_stores.files) - - -class AsyncVectorStoresResourceWithRawResponse: - def __init__(self, vector_stores: AsyncVectorStoresResource) -> None: - self._vector_stores = vector_stores - - self.create = async_to_raw_response_wrapper( - vector_stores.create, - ) - self.retrieve = async_to_raw_response_wrapper( - vector_stores.retrieve, - ) - self.update = async_to_raw_response_wrapper( - vector_stores.update, - ) - self.list = async_to_raw_response_wrapper( - vector_stores.list, - ) - self.delete = async_to_raw_response_wrapper( - vector_stores.delete, - ) - self.search = async_to_raw_response_wrapper( - vector_stores.search, - ) - - @cached_property - def file_batches(self) -> AsyncFileBatchesResourceWithRawResponse: - return AsyncFileBatchesResourceWithRawResponse(self._vector_stores.file_batches) - - @cached_property - def files(self) -> AsyncFilesResourceWithRawResponse: - return AsyncFilesResourceWithRawResponse(self._vector_stores.files) - - -class VectorStoresResourceWithStreamingResponse: - def __init__(self, vector_stores: VectorStoresResource) -> None: - self._vector_stores = vector_stores - - self.create = to_streamed_response_wrapper( - vector_stores.create, - ) - self.retrieve = to_streamed_response_wrapper( - vector_stores.retrieve, - ) - self.update = to_streamed_response_wrapper( - vector_stores.update, - ) - self.list = to_streamed_response_wrapper( - vector_stores.list, - ) - self.delete = to_streamed_response_wrapper( - vector_stores.delete, - ) - self.search = to_streamed_response_wrapper( - vector_stores.search, - ) - - @cached_property - def file_batches(self) -> FileBatchesResourceWithStreamingResponse: - return FileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches) - - @cached_property - def files(self) -> FilesResourceWithStreamingResponse: - return FilesResourceWithStreamingResponse(self._vector_stores.files) - - -class AsyncVectorStoresResourceWithStreamingResponse: - def __init__(self, vector_stores: AsyncVectorStoresResource) -> None: - self._vector_stores = vector_stores - - self.create = async_to_streamed_response_wrapper( - vector_stores.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - vector_stores.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - vector_stores.update, - ) - self.list = async_to_streamed_response_wrapper( - vector_stores.list, - ) - self.delete = async_to_streamed_response_wrapper( - vector_stores.delete, - ) - self.search = async_to_streamed_response_wrapper( - vector_stores.search, - ) - - @cached_property - def file_batches(self) -> AsyncFileBatchesResourceWithStreamingResponse: - return AsyncFileBatchesResourceWithStreamingResponse(self._vector_stores.file_batches) - - @cached_property - def files(self) -> AsyncFilesResourceWithStreamingResponse: - return AsyncFilesResourceWithStreamingResponse(self._vector_stores.files) diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py index 49c8d424..144bfd42 100644 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -2,135 +2,9 @@ from __future__ import annotations -from .batch import Batch as Batch from .model import Model as Model -from .upload import Upload as Upload -from .response import Response as Response -from .includable import Includable as Includable -from .openai_file import OpenAIFile as OpenAIFile -from .input_content import InputContent as InputContent -from .input_message import InputMessage as InputMessage -from .thread_object import ThreadObject as ThreadObject -from .output_message import OutputMessage as OutputMessage -from .reasoning_item import ReasoningItem as ReasoningItem -from .usage_response import UsageResponse as UsageResponse -from .compound_filter import CompoundFilter as CompoundFilter -from .function_object import FunctionObject as FunctionObject -from .images_response import ImagesResponse as ImagesResponse -from .assistant_object import AssistantObject as AssistantObject -from .file_list_params import FileListParams as FileListParams -from .reasoning_effort import ReasoningEffort as ReasoningEffort -from .voice_ids_shared import VoiceIDsShared as VoiceIDsShared -from .batch_list_params import BatchListParams as BatchListParams -from .comparison_filter import ComparisonFilter as ComparisonFilter -from .computer_tool_call import ComputerToolCall as ComputerToolCall -from .file_list_response import FileListResponse as FileListResponse -from .file_search_ranker import FileSearchRanker as FileSearchRanker -from .file_upload_params import FileUploadParams as FileUploadParams -from .function_tool_call import FunctionToolCall as FunctionToolCall -from .batch_create_params import BatchCreateParams as BatchCreateParams -from .batch_list_response import BatchListResponse as BatchListResponse -from .input_content_param import InputContentParam as InputContentParam -from .input_message_param import InputMessageParam as InputMessageParam from .model_list_response import ModelListResponse as ModelListResponse -from .response_properties import ResponseProperties as ResponseProperties -from .vector_store_object import VectorStoreObject as VectorStoreObject -from .assistant_tools_code import AssistantToolsCode as AssistantToolsCode -from .audit_log_actor_user import AuditLogActorUser as AuditLogActorUser -from .audit_log_event_type import AuditLogEventType as AuditLogEventType -from .file_delete_response import FileDeleteResponse as FileDeleteResponse -from .output_message_param import OutputMessageParam as OutputMessageParam -from .reasoning_item_param import ReasoningItemParam as ReasoningItemParam -from .thread_create_params import ThreadCreateParams as ThreadCreateParams -from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams -from .upload_create_params import UploadCreateParams as UploadCreateParams -from .web_search_tool_call import WebSearchToolCall as WebSearchToolCall -from .assistant_list_params import AssistantListParams as AssistantListParams -from .compound_filter_param import CompoundFilterParam as CompoundFilterParam -from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall -from .function_object_param import FunctionObjectParam as FunctionObjectParam -from .model_delete_response import ModelDeleteResponse as ModelDeleteResponse -from .transcription_segment import TranscriptionSegment as TranscriptionSegment -from .response_create_params import ResponseCreateParams as ResponseCreateParams -from .thread_delete_response import ThreadDeleteResponse as ThreadDeleteResponse -from .upload_add_part_params import UploadAddPartParams as UploadAddPartParams -from .upload_complete_params import UploadCompleteParams as UploadCompleteParams -from .voice_ids_shared_param import VoiceIDsSharedParam as VoiceIDsSharedParam -from .assistant_create_params import AssistantCreateParams as AssistantCreateParams -from .assistant_list_response import AssistantListResponse as AssistantListResponse -from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams -from .comparison_filter_param import ComparisonFilterParam as ComparisonFilterParam from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams -from .assistant_tools_function import AssistantToolsFunction as AssistantToolsFunction -from .completion_create_params import CompletionCreateParams as CompletionCreateParams -from .computer_tool_call_param import ComputerToolCallParam as ComputerToolCallParam -from .function_tool_call_param import FunctionToolCallParam as FunctionToolCallParam -from .image_create_edit_params import ImageCreateEditParams as ImageCreateEditParams -from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams -from .static_chunking_strategy import StaticChunkingStrategy as StaticChunkingStrategy from .stop_configuration_param import StopConfigurationParam as StopConfigurationParam -from .upload_add_part_response import UploadAddPartResponse as UploadAddPartResponse -from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams -from .assistant_delete_response import AssistantDeleteResponse as AssistantDeleteResponse -from .computer_tool_call_output import ComputerToolCallOutput as ComputerToolCallOutput from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse -from .function_tool_call_output import FunctionToolCallOutput as FunctionToolCallOutput -from .model_response_properties import ModelResponseProperties as ModelResponseProperties -from .assistant_supported_models import AssistantSupportedModels as AssistantSupportedModels -from .assistant_tools_code_param import AssistantToolsCodeParam as AssistantToolsCodeParam -from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse -from .moderation_classify_params import ModerationClassifyParams as ModerationClassifyParams -from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams -from .vector_store_list_response import VectorStoreListResponse as VectorStoreListResponse -from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams -from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams -from .web_search_tool_call_param import WebSearchToolCallParam as WebSearchToolCallParam -from .assistant_tools_file_search import AssistantToolsFileSearch as AssistantToolsFileSearch -from .create_thread_request_param import CreateThreadRequestParam as CreateThreadRequestParam -from .file_search_tool_call_param import FileSearchToolCallParam as FileSearchToolCallParam -from .audio_generate_speech_params import AudioGenerateSpeechParams as AudioGenerateSpeechParams -from .audio_translate_audio_params import AudioTranslateAudioParams as AudioTranslateAudioParams -from .moderation_classify_response import ModerationClassifyResponse as ModerationClassifyResponse -from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse -from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse -from .audio_transcribe_audio_params import AudioTranscribeAudioParams as AudioTranscribeAudioParams -from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams -from .organization_get_costs_params import OrganizationGetCostsParams as OrganizationGetCostsParams -from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter -from .assistant_tools_function_param import AssistantToolsFunctionParam as AssistantToolsFunctionParam -from .audio_translate_audio_response import AudioTranslateAudioResponse as AudioTranslateAudioResponse -from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse -from .image_create_generation_params import ImageCreateGenerationParams as ImageCreateGenerationParams -from .realtime_create_session_params import RealtimeCreateSessionParams as RealtimeCreateSessionParams -from .static_chunking_strategy_param import StaticChunkingStrategyParam as StaticChunkingStrategyParam -from .audio_transcribe_audio_response import AudioTranscribeAudioResponse as AudioTranscribeAudioResponse -from .computer_tool_call_output_param import ComputerToolCallOutputParam as ComputerToolCallOutputParam -from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck as ComputerToolCallSafetyCheck -from .function_tool_call_output_param import FunctionToolCallOutputParam as FunctionToolCallOutputParam -from .realtime_create_session_response import RealtimeCreateSessionResponse as RealtimeCreateSessionResponse -from .response_list_input_items_params import ResponseListInputItemsParams as ResponseListInputItemsParams -from .assistant_tools_file_search_param import AssistantToolsFileSearchParam as AssistantToolsFileSearchParam -from .response_list_input_items_response import ResponseListInputItemsResponse as ResponseListInputItemsResponse -from .organization_list_audit_logs_params import OrganizationListAuditLogsParams as OrganizationListAuditLogsParams -from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam -from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam as AutoChunkingStrategyRequestParam from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam -from .assistants_api_response_format_option import ( - AssistantsAPIResponseFormatOption as AssistantsAPIResponseFormatOption, -) -from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam as ComputerToolCallSafetyCheckParam -from .organization_list_audit_logs_response import ( - OrganizationListAuditLogsResponse as OrganizationListAuditLogsResponse, -) -from .static_chunking_strategy_request_param import ( - StaticChunkingStrategyRequestParam as StaticChunkingStrategyRequestParam, -) -from .assistants_api_response_format_option_param import ( - AssistantsAPIResponseFormatOptionParam as AssistantsAPIResponseFormatOptionParam, -) -from .realtime_create_transcription_session_params import ( - RealtimeCreateTranscriptionSessionParams as RealtimeCreateTranscriptionSessionParams, -) -from .realtime_create_transcription_session_response import ( - RealtimeCreateTranscriptionSessionResponse as RealtimeCreateTranscriptionSessionResponse, -) diff --git a/src/digitalocean_genai_sdk/types/assistant_create_params.py b/src/digitalocean_genai_sdk/types/assistant_create_params.py deleted file mode 100644 index b89e4742..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_create_params.py +++ /dev/null @@ -1,211 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .reasoning_effort import ReasoningEffort -from .assistant_supported_models import AssistantSupportedModels -from .assistant_tools_code_param import AssistantToolsCodeParam -from .assistant_tools_function_param import AssistantToolsFunctionParam -from .assistant_tools_file_search_param import AssistantToolsFileSearchParam -from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam - -__all__ = [ - "AssistantCreateParams", - "ToolResources", - "ToolResourcesCodeInterpreter", - "ToolResourcesFileSearch", - "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic", - "Tool", -] - - -class AssistantCreateParams(TypedDict, total=False): - model: Required[Union[str, AssistantSupportedModels]] - """ID of the model to use. - - You can use the [List models](/docs/api-reference/models/list) API to see all of - your available models, or see our [Model overview](/docs/models) for - descriptions of them. - """ - - description: Optional[str] - """The description of the assistant. The maximum length is 512 characters.""" - - instructions: Optional[str] - """The system instructions that the assistant uses. - - The maximum length is 256,000 characters. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - name: Optional[str] - """The name of the assistant. The maximum length is 256 characters.""" - - reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - """ - - response_format: Optional[AssistantsAPIResponseFormatOptionParam] - """Specifies the format that the model must output. - - Compatible with [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - """ - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - """ - - tool_resources: Optional[ToolResources] - """A set of resources that are used by the assistant's tools. - - The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ - - tools: Iterable[Tool] - """A list of tool enabled on the assistant. - - There can be a maximum of 128 tools per assistant. Tools can be of types - `code_interpreter`, `file_search`, or `function`. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - """ - - -class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy, - ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy, -] - - -class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy - """The chunking strategy used to chunk the file(s). - - If not set, will use the `auto` strategy. - """ - - file_ids: List[str] - """A list of [file](/docs/api-reference/files) IDs to add to the vector store. - - There can be a maximum of 10000 files in a vector store. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - -class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] - """ - The [vector store](/docs/api-reference/vector-stores/object) attached to this - assistant. There can be a maximum of 1 vector store attached to the assistant. - """ - - vector_stores: Iterable[ToolResourcesFileSearchVectorStore] - """ - A helper to create a [vector store](/docs/api-reference/vector-stores/object) - with file_ids and attach it to this assistant. There can be a maximum of 1 - vector store attached to the assistant. - """ - - -class ToolResources(TypedDict, total=False): - code_interpreter: ToolResourcesCodeInterpreter - - file_search: ToolResourcesFileSearch - - -Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/assistant_delete_response.py b/src/digitalocean_genai_sdk/types/assistant_delete_response.py deleted file mode 100644 index 04207049..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["AssistantDeleteResponse"] - - -class AssistantDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["assistant.deleted"] diff --git a/src/digitalocean_genai_sdk/types/assistant_list_params.py b/src/digitalocean_genai_sdk/types/assistant_list_params.py deleted file mode 100644 index 834ffbca..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_list_params.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["AssistantListParams"] - - -class AssistantListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/assistant_list_response.py b/src/digitalocean_genai_sdk/types/assistant_list_response.py deleted file mode 100644 index dfc90bfa..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel -from .assistant_object import AssistantObject - -__all__ = ["AssistantListResponse"] - - -class AssistantListResponse(BaseModel): - data: List[AssistantObject] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/assistant_object.py b/src/digitalocean_genai_sdk/types/assistant_object.py deleted file mode 100644 index 4aa71ab9..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_object.py +++ /dev/null @@ -1,133 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel -from .assistant_tools_code import AssistantToolsCode -from .assistant_tools_function import AssistantToolsFunction -from .assistant_tools_file_search import AssistantToolsFileSearch -from .assistants_api_response_format_option import AssistantsAPIResponseFormatOption - -__all__ = ["AssistantObject", "Tool", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] - -Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction] - - -class ToolResourcesCodeInterpreter(BaseModel): - file_ids: Optional[List[str]] = None - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter`` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearch(BaseModel): - vector_store_ids: Optional[List[str]] = None - """ - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached - to this assistant. There can be a maximum of 1 vector store attached to the - assistant. - """ - - -class ToolResources(BaseModel): - code_interpreter: Optional[ToolResourcesCodeInterpreter] = None - - file_search: Optional[ToolResourcesFileSearch] = None - - -class AssistantObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the assistant was created.""" - - description: Optional[str] = None - """The description of the assistant. The maximum length is 512 characters.""" - - instructions: Optional[str] = None - """The system instructions that the assistant uses. - - The maximum length is 256,000 characters. - """ - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - model: str - """ID of the model to use. - - You can use the [List models](/docs/api-reference/models/list) API to see all of - your available models, or see our [Model overview](/docs/models) for - descriptions of them. - """ - - name: Optional[str] = None - """The name of the assistant. The maximum length is 256 characters.""" - - object: Literal["assistant"] - """The object type, which is always `assistant`.""" - - tools: List[Tool] - """A list of tool enabled on the assistant. - - There can be a maximum of 128 tools per assistant. Tools can be of types - `code_interpreter`, `file_search`, or `function`. - """ - - response_format: Optional[AssistantsAPIResponseFormatOption] = None - """Specifies the format that the model must output. - - Compatible with [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - """ - - temperature: Optional[float] = None - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - """ - - tool_resources: Optional[ToolResources] = None - """A set of resources that are used by the assistant's tools. - - The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ - - top_p: Optional[float] = None - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - """ diff --git a/src/digitalocean_genai_sdk/types/assistant_supported_models.py b/src/digitalocean_genai_sdk/types/assistant_supported_models.py deleted file mode 100644 index 999b7f23..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_supported_models.py +++ /dev/null @@ -1,38 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["AssistantSupportedModels"] - -AssistantSupportedModels: TypeAlias = Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4.5-preview", - "gpt-4.5-preview-2025-02-27", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", -] diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code.py b/src/digitalocean_genai_sdk/types/assistant_tools_code.py deleted file mode 100644 index 73a40a71..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_tools_code.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["AssistantToolsCode"] - - -class AssistantToolsCode(BaseModel): - type: Literal["code_interpreter"] - """The type of tool being defined: `code_interpreter`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py deleted file mode 100644 index 01420dda..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_tools_code_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["AssistantToolsCodeParam"] - - -class AssistantToolsCodeParam(TypedDict, total=False): - type: Required[Literal["code_interpreter"]] - """The type of tool being defined: `code_interpreter`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py deleted file mode 100644 index 3c834718..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_tools_file_search.py +++ /dev/null @@ -1,56 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .file_search_ranker import FileSearchRanker - -__all__ = ["AssistantToolsFileSearch", "FileSearch", "FileSearchRankingOptions"] - - -class FileSearchRankingOptions(BaseModel): - score_threshold: float - """The score threshold for the file search. - - All values must be a floating point number between 0 and 1. - """ - - ranker: Optional[FileSearchRanker] = None - """The ranker to use for the file search. - - If not specified will use the `auto` ranker. - """ - - -class FileSearch(BaseModel): - max_num_results: Optional[int] = None - """The maximum number of results the file search tool should output. - - The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number - should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ - - ranking_options: Optional[FileSearchRankingOptions] = None - """The ranking options for the file search. - - If not specified, the file search tool will use the `auto` ranker and a - score_threshold of 0. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ - - -class AssistantToolsFileSearch(BaseModel): - type: Literal["file_search"] - """The type of tool being defined: `file_search`""" - - file_search: Optional[FileSearch] = None - """Overrides for the file search tool.""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py deleted file mode 100644 index 3f0e5af4..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_tools_file_search_param.py +++ /dev/null @@ -1,56 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from .file_search_ranker import FileSearchRanker - -__all__ = ["AssistantToolsFileSearchParam", "FileSearch", "FileSearchRankingOptions"] - - -class FileSearchRankingOptions(TypedDict, total=False): - score_threshold: Required[float] - """The score threshold for the file search. - - All values must be a floating point number between 0 and 1. - """ - - ranker: FileSearchRanker - """The ranker to use for the file search. - - If not specified will use the `auto` ranker. - """ - - -class FileSearch(TypedDict, total=False): - max_num_results: int - """The maximum number of results the file search tool should output. - - The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number - should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than `max_num_results` results. - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ - - ranking_options: FileSearchRankingOptions - """The ranking options for the file search. - - If not specified, the file search tool will use the `auto` ranker and a - score_threshold of 0. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ - - -class AssistantToolsFileSearchParam(TypedDict, total=False): - type: Required[Literal["file_search"]] - """The type of tool being defined: `file_search`""" - - file_search: FileSearch - """Overrides for the file search tool.""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function.py b/src/digitalocean_genai_sdk/types/assistant_tools_function.py deleted file mode 100644 index 89326d54..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_tools_function.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel -from .function_object import FunctionObject - -__all__ = ["AssistantToolsFunction"] - - -class AssistantToolsFunction(BaseModel): - function: FunctionObject - - type: Literal["function"] - """The type of tool being defined: `function`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py b/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py deleted file mode 100644 index 4e9ecf3d..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_tools_function_param.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from .function_object_param import FunctionObjectParam - -__all__ = ["AssistantToolsFunctionParam"] - - -class AssistantToolsFunctionParam(TypedDict, total=False): - function: Required[FunctionObjectParam] - - type: Required[Literal["function"]] - """The type of tool being defined: `function`""" diff --git a/src/digitalocean_genai_sdk/types/assistant_update_params.py b/src/digitalocean_genai_sdk/types/assistant_update_params.py deleted file mode 100644 index cf301dd4..00000000 --- a/src/digitalocean_genai_sdk/types/assistant_update_params.py +++ /dev/null @@ -1,137 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import TypeAlias, TypedDict - -from .reasoning_effort import ReasoningEffort -from .assistant_supported_models import AssistantSupportedModels -from .assistant_tools_code_param import AssistantToolsCodeParam -from .assistant_tools_function_param import AssistantToolsFunctionParam -from .assistant_tools_file_search_param import AssistantToolsFileSearchParam -from .assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam - -__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"] - - -class AssistantUpdateParams(TypedDict, total=False): - description: Optional[str] - """The description of the assistant. The maximum length is 512 characters.""" - - instructions: Optional[str] - """The system instructions that the assistant uses. - - The maximum length is 256,000 characters. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - model: Union[str, AssistantSupportedModels] - """ID of the model to use. - - You can use the [List models](/docs/api-reference/models/list) API to see all of - your available models, or see our [Model overview](/docs/models) for - descriptions of them. - """ - - name: Optional[str] - """The name of the assistant. The maximum length is 256 characters.""" - - reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - """ - - response_format: Optional[AssistantsAPIResponseFormatOptionParam] - """Specifies the format that the model must output. - - Compatible with [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - """ - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - """ - - tool_resources: Optional[ToolResources] - """A set of resources that are used by the assistant's tools. - - The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ - - tools: Iterable[Tool] - """A list of tool enabled on the assistant. - - There can be a maximum of 128 tools per assistant. Tools can be of types - `code_interpreter`, `file_search`, or `function`. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - """ - - -class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] - """ - Overrides the list of [file](/docs/api-reference/files) IDs made available to - the `code_interpreter` tool. There can be a maximum of 20 files associated with - the tool. - """ - - -class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] - """ - Overrides the [vector store](/docs/api-reference/vector-stores/object) attached - to this assistant. There can be a maximum of 1 vector store attached to the - assistant. - """ - - -class ToolResources(TypedDict, total=False): - code_interpreter: ToolResourcesCodeInterpreter - - file_search: ToolResourcesFileSearch - - -Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py deleted file mode 100644 index 07c4f71e..00000000 --- a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union -from typing_extensions import Literal, TypeAlias - -from .chat.response_format_text import ResponseFormatText -from .chat.response_format_json_object import ResponseFormatJsonObject -from .chat.response_format_json_schema import ResponseFormatJsonSchema - -__all__ = ["AssistantsAPIResponseFormatOption"] - -AssistantsAPIResponseFormatOption: TypeAlias = Union[ - Literal["auto"], ResponseFormatText, ResponseFormatJsonObject, ResponseFormatJsonSchema -] diff --git a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py b/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py deleted file mode 100644 index 7dbf967f..00000000 --- a/src/digitalocean_genai_sdk/types/assistants_api_response_format_option_param.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, TypeAlias - -from .chat.response_format_text_param import ResponseFormatTextParam -from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam -from .chat.response_format_json_schema_param import ResponseFormatJsonSchemaParam - -__all__ = ["AssistantsAPIResponseFormatOptionParam"] - -AssistantsAPIResponseFormatOptionParam: TypeAlias = Union[ - Literal["auto"], ResponseFormatTextParam, ResponseFormatJsonObjectParam, ResponseFormatJsonSchemaParam -] diff --git a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py b/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py deleted file mode 100644 index 8857594a..00000000 --- a/src/digitalocean_genai_sdk/types/audio_generate_speech_params.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypedDict - -from .voice_ids_shared_param import VoiceIDsSharedParam - -__all__ = ["AudioGenerateSpeechParams"] - - -class AudioGenerateSpeechParams(TypedDict, total=False): - input: Required[str] - """The text to generate audio for. The maximum length is 4096 characters.""" - - model: Required[Union[str, Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"]]] - """ - One of the available [TTS models](/docs/models#tts): `tts-1`, `tts-1-hd` or - `gpt-4o-mini-tts`. - """ - - voice: Required[VoiceIDsSharedParam] - """The voice to use when generating the audio. - - Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, - `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in - the [Text to speech guide](/docs/guides/text-to-speech#voice-options). - """ - - instructions: str - """Control the voice of your generated audio with additional instructions. - - Does not work with `tts-1` or `tts-1-hd`. - """ - - response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] - """The format to audio in. - - Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. - """ - - speed: float - """The speed of the generated audio. - - Select a value from `0.25` to `4.0`. `1.0` is the default. - """ diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py deleted file mode 100644 index cbc15157..00000000 --- a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_params.py +++ /dev/null @@ -1,87 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypedDict - -from .._types import FileTypes - -__all__ = ["AudioTranscribeAudioParams"] - - -class AudioTranscribeAudioParams(TypedDict, total=False): - file: Required[FileTypes] - """ - The audio file object (not file name) to transcribe, in one of these formats: - flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - """ - - model: Required[Union[str, Literal["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"]]] - """ID of the model to use. - - The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1` - (which is powered by our open source Whisper V2 model). - """ - - include: List[Literal["logprobs"]] - """Additional information to include in the transcription response. - - `logprobs` will return the log probabilities of the tokens in the response to - understand the model's confidence in the transcription. `logprobs` only works - with response_format set to `json` and only with the models `gpt-4o-transcribe` - and `gpt-4o-mini-transcribe`. - """ - - language: str - """The language of the input audio. - - Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - format will improve accuracy and latency. - """ - - prompt: str - """An optional text to guide the model's style or continue a previous audio - segment. - - The [prompt](/docs/guides/speech-to-text#prompting) should match the audio - language. - """ - - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] - """ - The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, - the only supported format is `json`. - """ - - stream: Optional[bool] - """ - If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the - [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) - for more information. - - Note: Streaming is not supported for the `whisper-1` model and will be ignored. - """ - - temperature: float - """The sampling temperature, between 0 and 1. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. If set to 0, the model will use - [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - """ - - timestamp_granularities: List[Literal["word", "segment"]] - """The timestamp granularities to populate for this transcription. - - `response_format` must be set `verbose_json` to use timestamp granularities. - Either or both of these options are supported: `word`, or `segment`. Note: There - is no additional latency for segment timestamps, but generating word timestamps - incurs additional latency. - """ diff --git a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py b/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py deleted file mode 100644 index 54b999ed..00000000 --- a/src/digitalocean_genai_sdk/types/audio_transcribe_audio_response.py +++ /dev/null @@ -1,69 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import TypeAlias - -from .._models import BaseModel -from .transcription_segment import TranscriptionSegment - -__all__ = [ - "AudioTranscribeAudioResponse", - "CreateTranscriptionResponseJson", - "CreateTranscriptionResponseJsonLogprob", - "CreateTranscriptionResponseVerboseJson", - "CreateTranscriptionResponseVerboseJsonWord", -] - - -class CreateTranscriptionResponseJsonLogprob(BaseModel): - token: str - """The token that was used to generate the log probability.""" - - bytes: List[int] - """The bytes that were used to generate the log probability.""" - - logprob: float - """The log probability of the token.""" - - -class CreateTranscriptionResponseJson(BaseModel): - text: str - """The transcribed text.""" - - logprobs: Optional[List[CreateTranscriptionResponseJsonLogprob]] = None - """The log probabilities of the tokens in the transcription. - - Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` - if `logprobs` is added to the `include` array. - """ - - -class CreateTranscriptionResponseVerboseJsonWord(BaseModel): - end: float - """End time of the word in seconds.""" - - start: float - """Start time of the word in seconds.""" - - word: str - """The text content of the word.""" - - -class CreateTranscriptionResponseVerboseJson(BaseModel): - duration: float - """The duration of the input audio.""" - - language: str - """The language of the input audio.""" - - text: str - """The transcribed text.""" - - segments: Optional[List[TranscriptionSegment]] = None - """Segments of the transcribed text and their corresponding details.""" - - words: Optional[List[CreateTranscriptionResponseVerboseJsonWord]] = None - """Extracted words and their corresponding timestamps.""" - - -AudioTranscribeAudioResponse: TypeAlias = Union[CreateTranscriptionResponseJson, CreateTranscriptionResponseVerboseJson] diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py deleted file mode 100644 index cc222f14..00000000 --- a/src/digitalocean_genai_sdk/types/audio_translate_audio_params.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypedDict - -from .._types import FileTypes - -__all__ = ["AudioTranslateAudioParams"] - - -class AudioTranslateAudioParams(TypedDict, total=False): - file: Required[FileTypes] - """ - The audio file object (not file name) translate, in one of these formats: flac, - mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - """ - - model: Required[Union[str, Literal["whisper-1"]]] - """ID of the model to use. - - Only `whisper-1` (which is powered by our open source Whisper V2 model) is - currently available. - """ - - prompt: str - """An optional text to guide the model's style or continue a previous audio - segment. - - The [prompt](/docs/guides/speech-to-text#prompting) should be in English. - """ - - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] - """ - The format of the output, in one of these options: `json`, `text`, `srt`, - `verbose_json`, or `vtt`. - """ - - temperature: float - """The sampling temperature, between 0 and 1. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. If set to 0, the model will use - [log probability](https://en.wikipedia.org/wiki/Log_probability) to - automatically increase the temperature until certain thresholds are hit. - """ diff --git a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py b/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py deleted file mode 100644 index 74d08a73..00000000 --- a/src/digitalocean_genai_sdk/types/audio_translate_audio_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import TypeAlias - -from .._models import BaseModel -from .transcription_segment import TranscriptionSegment - -__all__ = ["AudioTranslateAudioResponse", "CreateTranslationResponseJson", "CreateTranslationResponseVerboseJson"] - - -class CreateTranslationResponseJson(BaseModel): - text: str - - -class CreateTranslationResponseVerboseJson(BaseModel): - duration: float - """The duration of the input audio.""" - - language: str - """The language of the output translation (always `english`).""" - - text: str - """The translated text.""" - - segments: Optional[List[TranscriptionSegment]] = None - """Segments of the translated text and their corresponding details.""" - - -AudioTranslateAudioResponse: TypeAlias = Union[CreateTranslationResponseJson, CreateTranslationResponseVerboseJson] diff --git a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py b/src/digitalocean_genai_sdk/types/audit_log_actor_user.py deleted file mode 100644 index f3da325d..00000000 --- a/src/digitalocean_genai_sdk/types/audit_log_actor_user.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AuditLogActorUser"] - - -class AuditLogActorUser(BaseModel): - id: Optional[str] = None - """The user id.""" - - email: Optional[str] = None - """The user email.""" diff --git a/src/digitalocean_genai_sdk/types/audit_log_event_type.py b/src/digitalocean_genai_sdk/types/audit_log_event_type.py deleted file mode 100644 index 2031cbb8..00000000 --- a/src/digitalocean_genai_sdk/types/audit_log_event_type.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["AuditLogEventType"] - -AuditLogEventType: TypeAlias = Literal[ - "api_key.created", - "api_key.updated", - "api_key.deleted", - "invite.sent", - "invite.accepted", - "invite.deleted", - "login.succeeded", - "login.failed", - "logout.succeeded", - "logout.failed", - "organization.updated", - "project.created", - "project.updated", - "project.archived", - "service_account.created", - "service_account.updated", - "service_account.deleted", - "rate_limit.updated", - "rate_limit.deleted", - "user.added", - "user.updated", - "user.deleted", -] diff --git a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py deleted file mode 100644 index 5c0c131e..00000000 --- a/src/digitalocean_genai_sdk/types/auto_chunking_strategy_request_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["AutoChunkingStrategyRequestParam"] - - -class AutoChunkingStrategyRequestParam(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" diff --git a/src/digitalocean_genai_sdk/types/batch.py b/src/digitalocean_genai_sdk/types/batch.py deleted file mode 100644 index 1fdd6928..00000000 --- a/src/digitalocean_genai_sdk/types/batch.py +++ /dev/null @@ -1,109 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["Batch", "Errors", "ErrorsData", "RequestCounts"] - - -class ErrorsData(BaseModel): - code: Optional[str] = None - """An error code identifying the error type.""" - - line: Optional[int] = None - """The line number of the input file where the error occurred, if applicable.""" - - message: Optional[str] = None - """A human-readable message providing more details about the error.""" - - param: Optional[str] = None - """The name of the parameter that caused the error, if applicable.""" - - -class Errors(BaseModel): - data: Optional[List[ErrorsData]] = None - - object: Optional[str] = None - """The object type, which is always `list`.""" - - -class RequestCounts(BaseModel): - completed: int - """Number of requests that have been completed successfully.""" - - failed: int - """Number of requests that have failed.""" - - total: int - """Total number of requests in the batch.""" - - -class Batch(BaseModel): - id: str - - completion_window: str - """The time frame within which the batch should be processed.""" - - created_at: int - """The Unix timestamp (in seconds) for when the batch was created.""" - - endpoint: str - """The OpenAI API endpoint used by the batch.""" - - input_file_id: str - """The ID of the input file for the batch.""" - - object: Literal["batch"] - """The object type, which is always `batch`.""" - - status: Literal[ - "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" - ] - """The current status of the batch.""" - - cancelled_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch was cancelled.""" - - cancelling_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch started cancelling.""" - - completed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch was completed.""" - - error_file_id: Optional[str] = None - """The ID of the file containing the outputs of requests with errors.""" - - errors: Optional[Errors] = None - - expired_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch expired.""" - - expires_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch will expire.""" - - failed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch failed.""" - - finalizing_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch started finalizing.""" - - in_progress_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the batch started processing.""" - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - output_file_id: Optional[str] = None - """The ID of the file containing the outputs of successfully executed requests.""" - - request_counts: Optional[RequestCounts] = None - """The request counts for different statuses within the batch.""" diff --git a/src/digitalocean_genai_sdk/types/batch_create_params.py b/src/digitalocean_genai_sdk/types/batch_create_params.py deleted file mode 100644 index 08243244..00000000 --- a/src/digitalocean_genai_sdk/types/batch_create_params.py +++ /dev/null @@ -1,46 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["BatchCreateParams"] - - -class BatchCreateParams(TypedDict, total=False): - completion_window: Required[Literal["24h"]] - """The time frame within which the batch should be processed. - - Currently only `24h` is supported. - """ - - endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] - """The endpoint to be used for all requests in the batch. - - Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and - `/v1/completions` are supported. Note that `/v1/embeddings` batches are also - restricted to a maximum of 50,000 embedding inputs across all requests in the - batch. - """ - - input_file_id: Required[str] - """The ID of an uploaded file that contains requests for the new batch. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your input file must be formatted as a - [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with - the purpose `batch`. The file can contain up to 50,000 requests, and can be up - to 200 MB in size. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ diff --git a/src/digitalocean_genai_sdk/types/batch_list_params.py b/src/digitalocean_genai_sdk/types/batch_list_params.py deleted file mode 100644 index ef5e966b..00000000 --- a/src/digitalocean_genai_sdk/types/batch_list_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["BatchListParams"] - - -class BatchListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/batch_list_response.py b/src/digitalocean_genai_sdk/types/batch_list_response.py deleted file mode 100644 index 87c4f9b8..00000000 --- a/src/digitalocean_genai_sdk/types/batch_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .batch import Batch -from .._models import BaseModel - -__all__ = ["BatchListResponse"] - - -class BatchListResponse(BaseModel): - data: List[Batch] - - has_more: bool - - object: Literal["list"] - - first_id: Optional[str] = None - - last_id: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/chat/__init__.py b/src/digitalocean_genai_sdk/types/chat/__init__.py index cfa8c56a..7dbba0c2 100644 --- a/src/digitalocean_genai_sdk/types/chat/__init__.py +++ b/src/digitalocean_genai_sdk/types/chat/__init__.py @@ -6,25 +6,7 @@ from .token_logprob import TokenLogprob as TokenLogprob from .create_response import CreateResponse as CreateResponse from .response_message import ResponseMessage as ResponseMessage -from .message_tool_call import MessageToolCall as MessageToolCall -from .web_search_location import WebSearchLocation as WebSearchLocation -from .response_format_text import ResponseFormatText as ResponseFormatText -from .completion_list_params import CompletionListParams as CompletionListParams -from .model_ids_shared_param import ModelIDsSharedParam as ModelIDsSharedParam -from .message_tool_call_param import MessageToolCallParam as MessageToolCallParam -from .web_search_context_size import WebSearchContextSize as WebSearchContextSize from .completion_create_params import CompletionCreateParams as CompletionCreateParams -from .completion_list_response import CompletionListResponse as CompletionListResponse -from .completion_update_params import CompletionUpdateParams as CompletionUpdateParams -from .web_search_location_param import WebSearchLocationParam as WebSearchLocationParam -from .completion_delete_response import CompletionDeleteResponse as CompletionDeleteResponse -from .response_format_text_param import ResponseFormatTextParam as ResponseFormatTextParam -from .response_format_json_object import ResponseFormatJsonObject as ResponseFormatJsonObject -from .response_format_json_schema import ResponseFormatJsonSchema as ResponseFormatJsonSchema -from .completion_list_messages_params import CompletionListMessagesParams as CompletionListMessagesParams -from .completion_list_messages_response import CompletionListMessagesResponse as CompletionListMessagesResponse -from .response_format_json_object_param import ResponseFormatJsonObjectParam as ResponseFormatJsonObjectParam -from .response_format_json_schema_param import ResponseFormatJsonSchemaParam as ResponseFormatJsonSchemaParam from .request_message_content_part_text_param import ( RequestMessageContentPartTextParam as RequestMessageContentPartTextParam, ) diff --git a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py index d11f9322..fcbf22bb 100644 --- a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py +++ b/src/digitalocean_genai_sdk/types/chat/completion_create_params.py @@ -2,81 +2,31 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..reasoning_effort import ReasoningEffort -from ..function_object_param import FunctionObjectParam -from .model_ids_shared_param import ModelIDsSharedParam -from ..voice_ids_shared_param import VoiceIDsSharedParam -from .message_tool_call_param import MessageToolCallParam -from .web_search_context_size import WebSearchContextSize from ..stop_configuration_param import StopConfigurationParam -from .web_search_location_param import WebSearchLocationParam -from .response_format_text_param import ResponseFormatTextParam -from .response_format_json_object_param import ResponseFormatJsonObjectParam -from .response_format_json_schema_param import ResponseFormatJsonSchemaParam from ..chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from .request_message_content_part_text_param import RequestMessageContentPartTextParam __all__ = [ "CompletionCreateParams", "Message", - "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPart", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile", - "MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile", "MessageChatCompletionRequestAssistantMessage", - "MessageChatCompletionRequestAssistantMessageAudio", "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal", - "MessageChatCompletionRequestAssistantMessageFunctionCall", - "MessageChatCompletionRequestToolMessage", - "MessageChatCompletionRequestFunctionMessage", - "Audio", - "FunctionCall", - "FunctionCallChatCompletionFunctionCallOption", - "Function", - "Prediction", - "ResponseFormat", - "ToolChoice", - "ToolChoiceChatCompletionNamedToolChoice", - "ToolChoiceChatCompletionNamedToolChoiceFunction", - "Tool", - "WebSearchOptions", - "WebSearchOptionsUserLocation", ] class CompletionCreateParams(TypedDict, total=False): messages: Required[Iterable[Message]] - """A list of messages comprising the conversation so far. - - Depending on the [model](/docs/models) you use, different message types - (modalities) are supported, like [text](/docs/guides/text-generation), - [images](/docs/guides/vision), and [audio](/docs/guides/audio). - """ - - model: Required[ModelIDsSharedParam] - """Model ID used to generate the response, like `gpt-4o` or `o1`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the [model guide](/docs/models) to - browse and compare available models. - """ + """A list of messages comprising the conversation so far.""" - audio: Optional[Audio] - """Parameters for audio output. - - Required when audio output is requested with `modalities: ["audio"]`. - [Learn more](/docs/guides/audio). - """ + model: Required[str] + """Model ID used to generate the response.""" frequency_penalty: Optional[float] """Number between -2.0 and 2.0. @@ -85,29 +35,6 @@ class CompletionCreateParams(TypedDict, total=False): text so far, decreasing the model's likelihood to repeat the same line verbatim. """ - function_call: FunctionCall - """Deprecated in favor of `tool_choice`. - - Controls which (if any) function is called by the model. - - `none` means the model will not call a function and instead generates a message. - - `auto` means the model can pick between generating a message or calling a - function. - - Specifying a particular function via `{"name": "my_function"}` forces the model - to call that function. - - `none` is the default when no functions are present. `auto` is the default if - functions are present. - """ - - functions: Iterable[Function] - """Deprecated in favor of `tools`. - - A list of functions the model may generate JSON inputs for. - """ - logit_bias: Optional[Dict[str, int]] """Modify the likelihood of specified tokens appearing in the completion. @@ -128,18 +55,16 @@ class CompletionCreateParams(TypedDict, total=False): max_completion_tokens: Optional[int] """ - An upper bound for the number of tokens that can be generated for a completion, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. """ max_tokens: Optional[int] - """ - The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. This value can be used to control - [costs](https://openai.com/api/pricing/) for text generated via API. + """The maximum number of tokens that can be generated in the completion. - This value is now deprecated in favor of `max_completion_tokens`, and is not - compatible with [o1 series models](/docs/guides/reasoning). + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. """ metadata: Optional[Dict[str, str]] @@ -152,20 +77,6 @@ class CompletionCreateParams(TypedDict, total=False): a maximum length of 512 characters. """ - modalities: Optional[List[Literal["text", "audio"]]] - """ - Output types that you would like the model to generate. Most models are capable - of generating text, which is the default: - - `["text"]` - - The `gpt-4o-audio-preview` model can also be used to - [generate audio](/docs/guides/audio). To request that this model generate both - text and audio responses, you can use: - - `["text", "audio"]` - """ - n: Optional[int] """How many chat completion choices to generate for each input message. @@ -173,19 +84,6 @@ class CompletionCreateParams(TypedDict, total=False): of the choices. Keep `n` as `1` to minimize costs. """ - parallel_tool_calls: bool - """ - Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - """ - - prediction: Optional[Prediction] - """ - Static predicted output content, such as the content of a text file that is - being regenerated. - """ - presence_penalty: Optional[float] """Number between -2.0 and 2.0. @@ -193,76 +91,16 @@ class CompletionCreateParams(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - """ - - response_format: ResponseFormat - """An object specifying the format that the model must output. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - seed: Optional[int] - """ - This feature is in Beta. If specified, our system will make a best effort to - sample deterministically, such that repeated requests with the same `seed` and - parameters should return the same result. Determinism is not guaranteed, and you - should refer to the `system_fingerprint` response parameter to monitor changes - in the backend. - """ - - service_tier: Optional[Literal["auto", "default"]] - """Specifies the latency tier to use for processing the request. - - This parameter is relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarentee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. - - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` - utilized. - """ - stop: Optional[StopConfigurationParam] """Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. """ - store: Optional[bool] - """ - Whether or not to store the output of this chat completion request for use in - our [model distillation](/docs/guides/distillation) or - [evals](/docs/guides/evals) products. - """ - stream: Optional[bool] """ If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the [Streaming section below](/docs/api-reference/chat/streaming) for more - information, along with the - [streaming responses](/docs/guides/streaming-responses) guide for more - information on how to handle the streaming events. + generated using server-sent events. """ stream_options: Optional[ChatCompletionStreamOptionsParam] @@ -276,27 +114,6 @@ class CompletionCreateParams(TypedDict, total=False): this or `top_p` but not both. """ - tool_choice: ToolChoice - """ - Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - """ - - tools: Iterable[Tool] - """A list of tools the model may call. - - Currently, only functions are supported as a tool. Use this to provide a list of - functions the model may generate JSON inputs for. A max of 128 functions are - supported. - """ - top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -315,29 +132,8 @@ class CompletionCreateParams(TypedDict, total=False): user: str """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ - - web_search_options: WebSearchOptions - """ - This tool searches the web for relevant results to use in a response. Learn more - about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). - """ - - -class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] - """The contents of the developer message.""" - - role: Required[Literal["developer"]] - """The role of the messages author, in this case `developer`.""" - - name: str - """An optional name for the participant. - - Provides the model information to differentiate between participants of the same - role. + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. """ @@ -348,114 +144,22 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): role: Required[Literal["system"]] """The role of the messages author, in this case `system`.""" - name: str - """An optional name for the participant. - - Provides the model information to differentiate between participants of the same - role. - """ - - -class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL( - TypedDict, total=False -): - url: Required[str] - """Either a URL of the image or the base64 encoded image data.""" - - detail: Literal["auto", "low", "high"] - """Specifies the detail level of the image. - - Learn more in the - [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). - """ - - -class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage( - TypedDict, total=False -): - image_url: Required[ - MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImageImageURL - ] - - type: Required[Literal["image_url"]] - """The type of the content part.""" - - -class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio( - TypedDict, total=False -): - data: Required[str] - """Base64 encoded audio data.""" - - format: Required[Literal["wav", "mp3"]] - """The format of the encoded audio data. Currently supports "wav" and "mp3".""" - - -class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio( - TypedDict, total=False -): - input_audio: Required[ - MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudioInputAudio - ] - - type: Required[Literal["input_audio"]] - """The type of the content part. Always `input_audio`.""" - - -class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile( - TypedDict, total=False -): - file_data: str - """ - The base64 encoded file data, used when passing the file to the model as a - string. - """ - - file_id: str - """The ID of an uploaded file to use as input.""" - - filename: str - """The name of the file, used when passing the file to the model as a string.""" - - -class MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile( - TypedDict, total=False -): - file: Required[ - MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFileFile - ] - - type: Required[Literal["file"]] - """The type of the content part. Always `file`.""" +class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + """The contents of the developer message.""" -MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[ - RequestMessageContentPartTextParam, - MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartImage, - MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartAudio, - MessageChatCompletionRequestUserMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartFile, -] + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]] + content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] """The contents of the user message.""" role: Required[Literal["user"]] """The role of the messages author, in this case `user`.""" - name: str - """An optional name for the participant. - - Provides the model information to differentiate between participants of the same - role. - """ - - -class MessageChatCompletionRequestAssistantMessageAudio(TypedDict, total=False): - id: Required[str] - """Unique identifier for a previous audio response from the model.""" - class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal( TypedDict, total=False @@ -473,190 +177,20 @@ class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatC ] -class MessageChatCompletionRequestAssistantMessageFunctionCall(TypedDict, total=False): - arguments: Required[str] - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Required[str] - """The name of the function to call.""" - - class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - audio: Optional[MessageChatCompletionRequestAssistantMessageAudio] - """Data about a previous audio response from the model. - - [Learn more](/docs/guides/audio). - """ - content: Union[str, Iterable[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] - """The contents of the assistant message. - - Required unless `tool_calls` or `function_call` is specified. - """ - - function_call: Optional[MessageChatCompletionRequestAssistantMessageFunctionCall] - """Deprecated and replaced by `tool_calls`. - - The name and arguments of a function that should be called, as generated by the - model. - """ - - name: str - """An optional name for the participant. - - Provides the model information to differentiate between participants of the same - role. - """ + """The contents of the assistant message.""" refusal: Optional[str] """The refusal message by the assistant.""" - tool_calls: Iterable[MessageToolCallParam] - """The tool calls generated by the model, such as function calls.""" - - -class MessageChatCompletionRequestToolMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] - """The contents of the tool message.""" - - role: Required[Literal["tool"]] - """The role of the messages author, in this case `tool`.""" - - tool_call_id: Required[str] - """Tool call that this message is responding to.""" - - -class MessageChatCompletionRequestFunctionMessage(TypedDict, total=False): - content: Required[Optional[str]] - """The contents of the function message.""" - - name: Required[str] - """The name of the function to call.""" - - role: Required[Literal["function"]] - """The role of the messages author, in this case `function`.""" - Message: TypeAlias = Union[ - MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestSystemMessage, + MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, - MessageChatCompletionRequestToolMessage, - MessageChatCompletionRequestFunctionMessage, ] - - -class Audio(TypedDict, total=False): - format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] - """Specifies the output audio format. - - Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. - """ - - voice: Required[VoiceIDsSharedParam] - """The voice the model uses to respond. - - Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and - `shimmer`. - """ - - -class FunctionCallChatCompletionFunctionCallOption(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - -FunctionCall: TypeAlias = Union[Literal["none", "auto"], FunctionCallChatCompletionFunctionCallOption] - - -class Function(TypedDict, total=False): - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - parameters: Dict[str, object] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](/docs/guides/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - Omitting `parameters` defines a function with an empty parameter list. - """ - - -class Prediction(TypedDict, total=False): - content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] - """ - The content that should be matched when generating a model response. If - generated tokens would match this content, the entire model response can be - returned much more quickly. - """ - - type: Required[Literal["content"]] - """The type of the predicted content you want to provide. - - This type is currently always `content`. - """ - - -ResponseFormat: TypeAlias = Union[ResponseFormatTextParam, ResponseFormatJsonSchemaParam, ResponseFormatJsonObjectParam] - - -class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - -class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): - function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - -ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] - - -class Tool(TypedDict, total=False): - function: Required[FunctionObjectParam] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - -class WebSearchOptionsUserLocation(TypedDict, total=False): - approximate: Required[WebSearchLocationParam] - """Approximate location parameters for the search.""" - - type: Required[Literal["approximate"]] - """The type of location approximation. Always `approximate`.""" - - -class WebSearchOptions(TypedDict, total=False): - search_context_size: WebSearchContextSize - """ - High level guidance for the amount of context window space to use for the - search. One of `low`, `medium`, or `high`. `medium` is the default. - """ - - user_location: Optional[WebSearchOptionsUserLocation] - """Approximate location parameters for the search.""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py b/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py deleted file mode 100644 index 9e456e16..00000000 --- a/src/digitalocean_genai_sdk/types/chat/completion_delete_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["CompletionDeleteResponse"] - - -class CompletionDeleteResponse(BaseModel): - id: str - """The ID of the chat completion that was deleted.""" - - deleted: bool - """Whether the chat completion was deleted.""" - - object: Literal["chat.completion.deleted"] - """The type of object being deleted.""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py deleted file mode 100644 index 43f4a7cc..00000000 --- a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["CompletionListMessagesParams"] - - -class CompletionListMessagesParams(TypedDict, total=False): - after: str - """Identifier for the last message from the previous pagination request.""" - - limit: int - """Number of messages to retrieve.""" - - order: Literal["asc", "desc"] - """Sort order for messages by timestamp. - - Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. - """ diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py deleted file mode 100644 index 57087a63..00000000 --- a/src/digitalocean_genai_sdk/types/chat/completion_list_messages_response.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from ..._models import BaseModel -from .response_message import ResponseMessage - -__all__ = ["CompletionListMessagesResponse", "Data"] - - -class Data(ResponseMessage): - id: str - """The identifier of the chat message.""" - - -class CompletionListMessagesResponse(BaseModel): - data: List[Data] - """An array of chat completion message objects.""" - - first_id: str - """The identifier of the first chat message in the data array.""" - - has_more: bool - """Indicates whether there are more chat messages available.""" - - last_id: str - """The identifier of the last chat message in the data array.""" - - object: Literal["list"] - """The type of this object. It is always set to "list".""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py b/src/digitalocean_genai_sdk/types/chat/completion_list_params.py deleted file mode 100644 index 8f149e35..00000000 --- a/src/digitalocean_genai_sdk/types/chat/completion_list_params.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Literal, TypedDict - -__all__ = ["CompletionListParams"] - - -class CompletionListParams(TypedDict, total=False): - after: str - """Identifier for the last chat completion from the previous pagination request.""" - - limit: int - """Number of Chat Completions to retrieve.""" - - metadata: Optional[Dict[str, str]] - """A list of metadata keys to filter the Chat Completions by. Example: - - `metadata[key1]=value1&metadata[key2]=value2` - """ - - model: str - """The model used to generate the Chat Completions.""" - - order: Literal["asc", "desc"] - """Sort order for Chat Completions by timestamp. - - Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. - """ diff --git a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py b/src/digitalocean_genai_sdk/types/chat/completion_list_response.py deleted file mode 100644 index 2899f598..00000000 --- a/src/digitalocean_genai_sdk/types/chat/completion_list_response.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from ..._models import BaseModel -from .create_response import CreateResponse - -__all__ = ["CompletionListResponse"] - - -class CompletionListResponse(BaseModel): - data: List[CreateResponse] - """An array of chat completion objects.""" - - first_id: str - """The identifier of the first chat completion in the data array.""" - - has_more: bool - """Indicates whether there are more Chat Completions available.""" - - last_id: str - """The identifier of the last chat completion in the data array.""" - - object: Literal["list"] - """The type of this object. It is always set to "list".""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py b/src/digitalocean_genai_sdk/types/chat/completion_update_params.py deleted file mode 100644 index 1f09ecaa..00000000 --- a/src/digitalocean_genai_sdk/types/chat/completion_update_params.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["CompletionUpdateParams"] - - -class CompletionUpdateParams(TypedDict, total=False): - metadata: Required[Optional[Dict[str, str]]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ diff --git a/src/digitalocean_genai_sdk/types/chat/create_response.py b/src/digitalocean_genai_sdk/types/chat/create_response.py index a6320518..c80c56ac 100644 --- a/src/digitalocean_genai_sdk/types/chat/create_response.py +++ b/src/digitalocean_genai_sdk/types/chat/create_response.py @@ -20,14 +20,12 @@ class ChoiceLogprobs(BaseModel): class Choice(BaseModel): - finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] + finish_reason: Literal["stop", "length"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop - sequence, `length` if the maximum number of tokens specified in the request was - reached, `content_filter` if content was omitted due to a flag from our content - filters, `tool_calls` if the model called a tool, or `function_call` - (deprecated) if the model called a function. + sequence, or `length` if the maximum number of tokens specified in the request + was reached. """ index: int @@ -59,15 +57,5 @@ class CreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request.""" - - system_fingerprint: Optional[str] = None - """This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when - backend changes have been made that might impact determinism. - """ - usage: Optional[Usage] = None """Usage statistics for the completion request.""" diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call.py deleted file mode 100644 index abc22e05..00000000 --- a/src/digitalocean_genai_sdk/types/chat/message_tool_call.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["MessageToolCall", "Function"] - - -class Function(BaseModel): - arguments: str - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: str - """The name of the function to call.""" - - -class MessageToolCall(BaseModel): - id: str - """The ID of the tool call.""" - - function: Function - """The function that the model called.""" - - type: Literal["function"] - """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py b/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py deleted file mode 100644 index da60f69a..00000000 --- a/src/digitalocean_genai_sdk/types/chat/message_tool_call_param.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["MessageToolCallParam", "Function"] - - -class Function(TypedDict, total=False): - arguments: Required[str] - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Required[str] - """The name of the function to call.""" - - -class MessageToolCallParam(TypedDict, total=False): - id: Required[str] - """The ID of the tool call.""" - - function: Required[Function] - """The function that the model called.""" - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py b/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py deleted file mode 100644 index 497ba18c..00000000 --- a/src/digitalocean_genai_sdk/types/chat/model_ids_shared_param.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, TypeAlias - -__all__ = ["ModelIDsSharedParam"] - -ModelIDsSharedParam: TypeAlias = Union[ - str, - Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "gpt-4o-search-preview", - "gpt-4o-mini-search-preview", - "gpt-4o-search-preview-2025-03-11", - "gpt-4o-mini-search-preview-2025-03-11", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], -] diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py deleted file mode 100644 index 17ca162a..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_format_json_object.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseFormatJsonObject"] - - -class ResponseFormatJsonObject(BaseModel): - type: Literal["json_object"] - """The type of response format being defined. Always `json_object`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py deleted file mode 100644 index 5296cec4..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_format_json_object_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ResponseFormatJsonObjectParam"] - - -class ResponseFormatJsonObjectParam(TypedDict, total=False): - type: Required[Literal["json_object"]] - """The type of response format being defined. Always `json_object`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py deleted file mode 100644 index a65bf052..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional -from typing_extensions import Literal - -from pydantic import Field as FieldInfo - -from ..._models import BaseModel - -__all__ = ["ResponseFormatJsonSchema", "JsonSchema"] - - -class JsonSchema(BaseModel): - name: str - """The name of the response format. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: Optional[str] = None - """ - A description of what the response format is for, used by the model to determine - how to respond in the format. - """ - - schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None) - """ - The schema for the response format, described as a JSON Schema object. Learn how - to build JSON schemas [here](https://json-schema.org/). - """ - - strict: Optional[bool] = None - """ - Whether to enable strict schema adherence when generating the output. If set to - true, the model will always follow the exact schema defined in the `schema` - field. Only a subset of JSON Schema is supported when `strict` is `true`. To - learn more, read the - [Structured Outputs guide](/docs/guides/structured-outputs). - """ - - -class ResponseFormatJsonSchema(BaseModel): - json_schema: JsonSchema - """Structured Outputs configuration options, including a JSON Schema.""" - - type: Literal["json_schema"] - """The type of response format being defined. Always `json_schema`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py deleted file mode 100644 index 32d254c3..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_format_json_schema_param.py +++ /dev/null @@ -1,46 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ResponseFormatJsonSchemaParam", "JsonSchema"] - - -class JsonSchema(TypedDict, total=False): - name: Required[str] - """The name of the response format. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: str - """ - A description of what the response format is for, used by the model to determine - how to respond in the format. - """ - - schema: Dict[str, object] - """ - The schema for the response format, described as a JSON Schema object. Learn how - to build JSON schemas [here](https://json-schema.org/). - """ - - strict: Optional[bool] - """ - Whether to enable strict schema adherence when generating the output. If set to - true, the model will always follow the exact schema defined in the `schema` - field. Only a subset of JSON Schema is supported when `strict` is `true`. To - learn more, read the - [Structured Outputs guide](/docs/guides/structured-outputs). - """ - - -class ResponseFormatJsonSchemaParam(TypedDict, total=False): - json_schema: Required[JsonSchema] - """Structured Outputs configuration options, including a JSON Schema.""" - - type: Required[Literal["json_schema"]] - """The type of response format being defined. Always `json_schema`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text.py b/src/digitalocean_genai_sdk/types/chat/response_format_text.py deleted file mode 100644 index f0c8cfb7..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_format_text.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseFormatText"] - - -class ResponseFormatText(BaseModel): - type: Literal["text"] - """The type of response format being defined. Always `text`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py b/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py deleted file mode 100644 index 0d37573e..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_format_text_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ResponseFormatTextParam"] - - -class ResponseFormatTextParam(TypedDict, total=False): - type: Required[Literal["text"]] - """The type of response format being defined. Always `text`.""" diff --git a/src/digitalocean_genai_sdk/types/chat/response_message.py b/src/digitalocean_genai_sdk/types/chat/response_message.py index 940adf8f..22e81c9b 100644 --- a/src/digitalocean_genai_sdk/types/chat/response_message.py +++ b/src/digitalocean_genai_sdk/types/chat/response_message.py @@ -1,67 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import Optional from typing_extensions import Literal from ..._models import BaseModel -from .message_tool_call import MessageToolCall -__all__ = ["ResponseMessage", "Annotation", "AnnotationURLCitation", "Audio", "FunctionCall"] - - -class AnnotationURLCitation(BaseModel): - end_index: int - """The index of the last character of the URL citation in the message.""" - - start_index: int - """The index of the first character of the URL citation in the message.""" - - title: str - """The title of the web resource.""" - - url: str - """The URL of the web resource.""" - - -class Annotation(BaseModel): - type: Literal["url_citation"] - """The type of the URL citation. Always `url_citation`.""" - - url_citation: AnnotationURLCitation - """A URL citation when using web search.""" - - -class Audio(BaseModel): - id: str - """Unique identifier for this audio response.""" - - data: str - """ - Base64 encoded audio bytes generated by the model, in the format specified in - the request. - """ - - expires_at: int - """ - The Unix timestamp (in seconds) for when this audio response will no longer be - accessible on the server for use in multi-turn conversations. - """ - - transcript: str - """Transcript of the audio generated by the model.""" - - -class FunctionCall(BaseModel): - arguments: str - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: str - """The name of the function to call.""" +__all__ = ["ResponseMessage"] class ResponseMessage(BaseModel): @@ -73,25 +17,3 @@ class ResponseMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" - - annotations: Optional[List[Annotation]] = None - """ - Annotations for the message, when applicable, as when using the - [web search tool](/docs/guides/tools-web-search?api-mode=chat). - """ - - audio: Optional[Audio] = None - """ - If the audio output modality is requested, this object contains data about the - audio response from the model. [Learn more](/docs/guides/audio). - """ - - function_call: Optional[FunctionCall] = None - """Deprecated and replaced by `tool_calls`. - - The name and arguments of a function that should be called, as generated by the - model. - """ - - tool_calls: Optional[List[MessageToolCall]] = None - """The tool calls generated by the model, such as function calls.""" diff --git a/src/digitalocean_genai_sdk/types/chat/usage.py b/src/digitalocean_genai_sdk/types/chat/usage.py index 1a7a1abf..a3785b9f 100644 --- a/src/digitalocean_genai_sdk/types/chat/usage.py +++ b/src/digitalocean_genai_sdk/types/chat/usage.py @@ -1,40 +1,8 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional - from ..._models import BaseModel -__all__ = ["Usage", "CompletionTokensDetails", "PromptTokensDetails"] - - -class CompletionTokensDetails(BaseModel): - accepted_prediction_tokens: Optional[int] = None - """ - When using Predicted Outputs, the number of tokens in the prediction that - appeared in the completion. - """ - - audio_tokens: Optional[int] = None - """Audio input tokens generated by the model.""" - - reasoning_tokens: Optional[int] = None - """Tokens generated by the model for reasoning.""" - - rejected_prediction_tokens: Optional[int] = None - """ - When using Predicted Outputs, the number of tokens in the prediction that did - not appear in the completion. However, like reasoning tokens, these tokens are - still counted in the total completion tokens for purposes of billing, output, - and context window limits. - """ - - -class PromptTokensDetails(BaseModel): - audio_tokens: Optional[int] = None - """Audio input tokens present in the prompt.""" - - cached_tokens: Optional[int] = None - """Cached tokens present in the prompt.""" +__all__ = ["Usage"] class Usage(BaseModel): @@ -46,9 +14,3 @@ class Usage(BaseModel): total_tokens: int """Total number of tokens used in the request (prompt + completion).""" - - completion_tokens_details: Optional[CompletionTokensDetails] = None - """Breakdown of tokens used in a completion.""" - - prompt_tokens_details: Optional[PromptTokensDetails] = None - """Breakdown of tokens used in the prompt.""" diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py b/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py deleted file mode 100644 index 18b284a9..00000000 --- a/src/digitalocean_genai_sdk/types/chat/web_search_context_size.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["WebSearchContextSize"] - -WebSearchContextSize: TypeAlias = Literal["low", "medium", "high"] diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location.py b/src/digitalocean_genai_sdk/types/chat/web_search_location.py deleted file mode 100644 index 192c4efa..00000000 --- a/src/digitalocean_genai_sdk/types/chat/web_search_location.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["WebSearchLocation"] - - -class WebSearchLocation(BaseModel): - city: Optional[str] = None - """Free text input for the city of the user, e.g. `San Francisco`.""" - - country: Optional[str] = None - """ - The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - the user, e.g. `US`. - """ - - region: Optional[str] = None - """Free text input for the region of the user, e.g. `California`.""" - - timezone: Optional[str] = None - """ - The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - user, e.g. `America/Los_Angeles`. - """ diff --git a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py b/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py deleted file mode 100644 index bc4d5a4c..00000000 --- a/src/digitalocean_genai_sdk/types/chat/web_search_location_param.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["WebSearchLocationParam"] - - -class WebSearchLocationParam(TypedDict, total=False): - city: str - """Free text input for the city of the user, e.g. `San Francisco`.""" - - country: str - """ - The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - the user, e.g. `US`. - """ - - region: str - """Free text input for the region of the user, e.g. `California`.""" - - timezone: str - """ - The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - user, e.g. `America/Los_Angeles`. - """ diff --git a/src/digitalocean_genai_sdk/types/comparison_filter.py b/src/digitalocean_genai_sdk/types/comparison_filter.py deleted file mode 100644 index 547aac28..00000000 --- a/src/digitalocean_genai_sdk/types/comparison_filter.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ComparisonFilter"] - - -class ComparisonFilter(BaseModel): - key: str - """The key to compare against the value.""" - - type: Literal["eq", "ne", "gt", "gte", "lt", "lte"] - """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. - - - `eq`: equals - - `ne`: not equal - - `gt`: greater than - - `gte`: greater than or equal - - `lt`: less than - - `lte`: less than or equal - """ - - value: Union[str, float, bool] - """ - The value to compare against the attribute key; supports string, number, or - boolean types. - """ diff --git a/src/digitalocean_genai_sdk/types/comparison_filter_param.py b/src/digitalocean_genai_sdk/types/comparison_filter_param.py deleted file mode 100644 index 2df2d744..00000000 --- a/src/digitalocean_genai_sdk/types/comparison_filter_param.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ComparisonFilterParam"] - - -class ComparisonFilterParam(TypedDict, total=False): - key: Required[str] - """The key to compare against the value.""" - - type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]] - """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. - - - `eq`: equals - - `ne`: not equal - - `gt`: greater than - - `gte`: greater than or equal - - `lt`: less than - - `lte`: less than or equal - """ - - value: Required[Union[str, float, bool]] - """ - The value to compare against the attribute key; supports string, number, or - boolean types. - """ diff --git a/src/digitalocean_genai_sdk/types/completion_create_params.py b/src/digitalocean_genai_sdk/types/completion_create_params.py deleted file mode 100644 index 36709c57..00000000 --- a/src/digitalocean_genai_sdk/types/completion_create_params.py +++ /dev/null @@ -1,168 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict - -from .stop_configuration_param import StopConfigurationParam -from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam - -__all__ = ["CompletionCreateParams"] - - -class CompletionCreateParams(TypedDict, total=False): - model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]] - """ID of the model to use. - - You can use the [List models](/docs/api-reference/models/list) API to see all of - your available models, or see our [Model overview](/docs/models) for - descriptions of them. - """ - - prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] - """ - The prompt(s) to generate completions for, encoded as a string, array of - strings, array of tokens, or array of token arrays. - - Note that <|endoftext|> is the document separator that the model sees during - training, so if a prompt is not specified the model will generate as if from the - beginning of a new document. - """ - - best_of: Optional[int] - """ - Generates `best_of` completions server-side and returns the "best" (the one with - the highest log probability per token). Results cannot be streamed. - - When used with `n`, `best_of` controls the number of candidate completions and - `n` specifies how many to return – `best_of` must be greater than `n`. - - **Note:** Because this parameter generates many completions, it can quickly - consume your token quota. Use carefully and ensure that you have reasonable - settings for `max_tokens` and `stop`. - """ - - echo: Optional[bool] - """Echo back the prompt in addition to the completion""" - - frequency_penalty: Optional[float] - """Number between -2.0 and 2.0. - - Positive values penalize new tokens based on their existing frequency in the - text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation) - """ - - logit_bias: Optional[Dict[str, int]] - """Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the GPT - tokenizer) to an associated bias value from -100 to 100. You can use this - [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - Mathematically, the bias is added to the logits generated by the model prior to - sampling. The exact effect will vary per model, but values between -1 and 1 - should decrease or increase likelihood of selection; values like -100 or 100 - should result in a ban or exclusive selection of the relevant token. - - As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - from being generated. - """ - - logprobs: Optional[int] - """ - Include the log probabilities on the `logprobs` most likely output tokens, as - well the chosen tokens. For example, if `logprobs` is 5, the API will return a - list of the 5 most likely tokens. The API will always return the `logprob` of - the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. - """ - - max_tokens: Optional[int] - """ - The maximum number of [tokens](/tokenizer) that can be generated in the - completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. - """ - - n: Optional[int] - """How many completions to generate for each prompt. - - **Note:** Because this parameter generates many completions, it can quickly - consume your token quota. Use carefully and ensure that you have reasonable - settings for `max_tokens` and `stop`. - """ - - presence_penalty: Optional[float] - """Number between -2.0 and 2.0. - - Positive values penalize new tokens based on whether they appear in the text so - far, increasing the model's likelihood to talk about new topics. - - [See more information about frequency and presence penalties.](/docs/guides/text-generation) - """ - - seed: Optional[int] - """ - If specified, our system will make a best effort to sample deterministically, - such that repeated requests with the same `seed` and parameters should return - the same result. - - Determinism is not guaranteed, and you should refer to the `system_fingerprint` - response parameter to monitor changes in the backend. - """ - - stop: Optional[StopConfigurationParam] - """Up to 4 sequences where the API will stop generating further tokens. - - The returned text will not contain the stop sequence. - """ - - stream: Optional[bool] - """Whether to stream back partial progress. - - If set, tokens will be sent as data-only - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - as they become available, with the stream terminated by a `data: [DONE]` - message. - [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - """ - - stream_options: Optional[ChatCompletionStreamOptionsParam] - """Options for streaming response. Only set this when you set `stream: true`.""" - - suffix: Optional[str] - """The suffix that comes after a completion of inserted text. - - This parameter is only supported for `gpt-3.5-turbo-instruct`. - """ - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - """ - - user: str - """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ diff --git a/src/digitalocean_genai_sdk/types/completion_create_response.py b/src/digitalocean_genai_sdk/types/completion_create_response.py deleted file mode 100644 index 2e1028bf..00000000 --- a/src/digitalocean_genai_sdk/types/completion_create_response.py +++ /dev/null @@ -1,63 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .chat.usage import Usage - -__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs"] - - -class ChoiceLogprobs(BaseModel): - text_offset: Optional[List[int]] = None - - token_logprobs: Optional[List[float]] = None - - tokens: Optional[List[str]] = None - - top_logprobs: Optional[List[Dict[str, float]]] = None - - -class Choice(BaseModel): - finish_reason: Literal["stop", "length", "content_filter"] - """The reason the model stopped generating tokens. - - This will be `stop` if the model hit a natural stop point or a provided stop - sequence, `length` if the maximum number of tokens specified in the request was - reached, or `content_filter` if content was omitted due to a flag from our - content filters. - """ - - index: int - - logprobs: Optional[ChoiceLogprobs] = None - - text: str - - -class CompletionCreateResponse(BaseModel): - id: str - """A unique identifier for the completion.""" - - choices: List[Choice] - """The list of completion choices the model generated for the input prompt.""" - - created: int - """The Unix timestamp (in seconds) of when the completion was created.""" - - model: str - """The model used for completion.""" - - object: Literal["text_completion"] - """The object type, which is always "text_completion" """ - - system_fingerprint: Optional[str] = None - """This fingerprint represents the backend configuration that the model runs with. - - Can be used in conjunction with the `seed` request parameter to understand when - backend changes have been made that might impact determinism. - """ - - usage: Optional[Usage] = None - """Usage statistics for the completion request.""" diff --git a/src/digitalocean_genai_sdk/types/compound_filter.py b/src/digitalocean_genai_sdk/types/compound_filter.py deleted file mode 100644 index bf1f793f..00000000 --- a/src/digitalocean_genai_sdk/types/compound_filter.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel -from .comparison_filter import ComparisonFilter - -__all__ = ["CompoundFilter", "Filter"] - -Filter: TypeAlias = Union[ComparisonFilter, Dict[str, object]] - - -class CompoundFilter(BaseModel): - filters: List[Filter] - """Array of filters to combine. - - Items can be `ComparisonFilter` or `CompoundFilter`. - """ - - type: Literal["and", "or"] - """Type of operation: `and` or `or`.""" diff --git a/src/digitalocean_genai_sdk/types/compound_filter_param.py b/src/digitalocean_genai_sdk/types/compound_filter_param.py deleted file mode 100644 index 1f66a965..00000000 --- a/src/digitalocean_genai_sdk/types/compound_filter_param.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .comparison_filter_param import ComparisonFilterParam - -__all__ = ["CompoundFilterParam", "Filter"] - -Filter: TypeAlias = Union[ComparisonFilterParam, Dict[str, object]] - - -class CompoundFilterParam(TypedDict, total=False): - filters: Required[Iterable[Filter]] - """Array of filters to combine. - - Items can be `ComparisonFilter` or `CompoundFilter`. - """ - - type: Required[Literal["and", "or"]] - """Type of operation: `and` or `or`.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call.py b/src/digitalocean_genai_sdk/types/computer_tool_call.py deleted file mode 100644 index b127e694..00000000 --- a/src/digitalocean_genai_sdk/types/computer_tool_call.py +++ /dev/null @@ -1,198 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel -from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck - -__all__ = [ - "ComputerToolCall", - "Action", - "ActionClick", - "ActionDoubleClick", - "ActionDrag", - "ActionDragPath", - "ActionKeyPress", - "ActionMove", - "ActionScreenshot", - "ActionScroll", - "ActionType", - "ActionWait", -] - - -class ActionClick(BaseModel): - button: Literal["left", "right", "wheel", "back", "forward"] - """Indicates which mouse button was pressed during the click. - - One of `left`, `right`, `wheel`, `back`, or `forward`. - """ - - type: Literal["click"] - """Specifies the event type. - - For a click action, this property is always set to `click`. - """ - - x: int - """The x-coordinate where the click occurred.""" - - y: int - """The y-coordinate where the click occurred.""" - - -class ActionDoubleClick(BaseModel): - type: Literal["double_click"] - """Specifies the event type. - - For a double click action, this property is always set to `double_click`. - """ - - x: int - """The x-coordinate where the double click occurred.""" - - y: int - """The y-coordinate where the double click occurred.""" - - -class ActionDragPath(BaseModel): - x: int - """The x-coordinate.""" - - y: int - """The y-coordinate.""" - - -class ActionDrag(BaseModel): - path: List[ActionDragPath] - """An array of coordinates representing the path of the drag action. - - Coordinates will appear as an array of objects, eg - - ``` - [ - - { x: 100, y: 200 }, - { x: 200, y: 300 } - ] - ``` - """ - - type: Literal["drag"] - """Specifies the event type. - - For a drag action, this property is always set to `drag`. - """ - - -class ActionKeyPress(BaseModel): - keys: List[str] - """The combination of keys the model is requesting to be pressed. - - This is an array of strings, each representing a key. - """ - - type: Literal["keypress"] - """Specifies the event type. - - For a keypress action, this property is always set to `keypress`. - """ - - -class ActionMove(BaseModel): - type: Literal["move"] - """Specifies the event type. - - For a move action, this property is always set to `move`. - """ - - x: int - """The x-coordinate to move to.""" - - y: int - """The y-coordinate to move to.""" - - -class ActionScreenshot(BaseModel): - type: Literal["screenshot"] - """Specifies the event type. - - For a screenshot action, this property is always set to `screenshot`. - """ - - -class ActionScroll(BaseModel): - scroll_x: int - """The horizontal scroll distance.""" - - scroll_y: int - """The vertical scroll distance.""" - - type: Literal["scroll"] - """Specifies the event type. - - For a scroll action, this property is always set to `scroll`. - """ - - x: int - """The x-coordinate where the scroll occurred.""" - - y: int - """The y-coordinate where the scroll occurred.""" - - -class ActionType(BaseModel): - text: str - """The text to type.""" - - type: Literal["type"] - """Specifies the event type. - - For a type action, this property is always set to `type`. - """ - - -class ActionWait(BaseModel): - type: Literal["wait"] - """Specifies the event type. - - For a wait action, this property is always set to `wait`. - """ - - -Action: TypeAlias = Union[ - ActionClick, - ActionDoubleClick, - ActionDrag, - ActionKeyPress, - ActionMove, - ActionScreenshot, - ActionScroll, - ActionType, - ActionWait, -] - - -class ComputerToolCall(BaseModel): - id: str - """The unique ID of the computer call.""" - - action: Action - """A click action.""" - - call_id: str - """An identifier used when responding to the tool call with output.""" - - pending_safety_checks: List[ComputerToolCallSafetyCheck] - """The pending safety checks for the computer call.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - type: Literal["computer_call"] - """The type of the computer call. Always `computer_call`.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output.py deleted file mode 100644 index 0133a29a..00000000 --- a/src/digitalocean_genai_sdk/types/computer_tool_call_output.py +++ /dev/null @@ -1,50 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .computer_tool_call_safety_check import ComputerToolCallSafetyCheck - -__all__ = ["ComputerToolCallOutput", "Output"] - - -class Output(BaseModel): - type: Literal["computer_screenshot"] - """Specifies the event type. - - For a computer screenshot, this property is always set to `computer_screenshot`. - """ - - file_id: Optional[str] = None - """The identifier of an uploaded file that contains the screenshot.""" - - image_url: Optional[str] = None - """The URL of the screenshot image.""" - - -class ComputerToolCallOutput(BaseModel): - call_id: str - """The ID of the computer tool call that produced the output.""" - - output: Output - """A computer screenshot image used with the computer use tool.""" - - type: Literal["computer_call_output"] - """The type of the computer tool call output. Always `computer_call_output`.""" - - id: Optional[str] = None - """The ID of the computer tool call output.""" - - acknowledged_safety_checks: Optional[List[ComputerToolCallSafetyCheck]] = None - """ - The safety checks reported by the API that have been acknowledged by the - developer. - """ - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the message input. - - One of `in_progress`, `completed`, or `incomplete`. Populated when input items - are returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py deleted file mode 100644 index 764c4da8..00000000 --- a/src/digitalocean_genai_sdk/types/computer_tool_call_output_param.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Literal, Required, TypedDict - -from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam - -__all__ = ["ComputerToolCallOutputParam", "Output"] - - -class Output(TypedDict, total=False): - type: Required[Literal["computer_screenshot"]] - """Specifies the event type. - - For a computer screenshot, this property is always set to `computer_screenshot`. - """ - - file_id: str - """The identifier of an uploaded file that contains the screenshot.""" - - image_url: str - """The URL of the screenshot image.""" - - -class ComputerToolCallOutputParam(TypedDict, total=False): - call_id: Required[str] - """The ID of the computer tool call that produced the output.""" - - output: Required[Output] - """A computer screenshot image used with the computer use tool.""" - - type: Required[Literal["computer_call_output"]] - """The type of the computer tool call output. Always `computer_call_output`.""" - - id: str - """The ID of the computer tool call output.""" - - acknowledged_safety_checks: Iterable[ComputerToolCallSafetyCheckParam] - """ - The safety checks reported by the API that have been acknowledged by the - developer. - """ - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the message input. - - One of `in_progress`, `completed`, or `incomplete`. Populated when input items - are returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_param.py deleted file mode 100644 index 7fb87bfa..00000000 --- a/src/digitalocean_genai_sdk/types/computer_tool_call_param.py +++ /dev/null @@ -1,199 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .computer_tool_call_safety_check_param import ComputerToolCallSafetyCheckParam - -__all__ = [ - "ComputerToolCallParam", - "Action", - "ActionClick", - "ActionDoubleClick", - "ActionDrag", - "ActionDragPath", - "ActionKeyPress", - "ActionMove", - "ActionScreenshot", - "ActionScroll", - "ActionType", - "ActionWait", -] - - -class ActionClick(TypedDict, total=False): - button: Required[Literal["left", "right", "wheel", "back", "forward"]] - """Indicates which mouse button was pressed during the click. - - One of `left`, `right`, `wheel`, `back`, or `forward`. - """ - - type: Required[Literal["click"]] - """Specifies the event type. - - For a click action, this property is always set to `click`. - """ - - x: Required[int] - """The x-coordinate where the click occurred.""" - - y: Required[int] - """The y-coordinate where the click occurred.""" - - -class ActionDoubleClick(TypedDict, total=False): - type: Required[Literal["double_click"]] - """Specifies the event type. - - For a double click action, this property is always set to `double_click`. - """ - - x: Required[int] - """The x-coordinate where the double click occurred.""" - - y: Required[int] - """The y-coordinate where the double click occurred.""" - - -class ActionDragPath(TypedDict, total=False): - x: Required[int] - """The x-coordinate.""" - - y: Required[int] - """The y-coordinate.""" - - -class ActionDrag(TypedDict, total=False): - path: Required[Iterable[ActionDragPath]] - """An array of coordinates representing the path of the drag action. - - Coordinates will appear as an array of objects, eg - - ``` - [ - - { x: 100, y: 200 }, - { x: 200, y: 300 } - ] - ``` - """ - - type: Required[Literal["drag"]] - """Specifies the event type. - - For a drag action, this property is always set to `drag`. - """ - - -class ActionKeyPress(TypedDict, total=False): - keys: Required[List[str]] - """The combination of keys the model is requesting to be pressed. - - This is an array of strings, each representing a key. - """ - - type: Required[Literal["keypress"]] - """Specifies the event type. - - For a keypress action, this property is always set to `keypress`. - """ - - -class ActionMove(TypedDict, total=False): - type: Required[Literal["move"]] - """Specifies the event type. - - For a move action, this property is always set to `move`. - """ - - x: Required[int] - """The x-coordinate to move to.""" - - y: Required[int] - """The y-coordinate to move to.""" - - -class ActionScreenshot(TypedDict, total=False): - type: Required[Literal["screenshot"]] - """Specifies the event type. - - For a screenshot action, this property is always set to `screenshot`. - """ - - -class ActionScroll(TypedDict, total=False): - scroll_x: Required[int] - """The horizontal scroll distance.""" - - scroll_y: Required[int] - """The vertical scroll distance.""" - - type: Required[Literal["scroll"]] - """Specifies the event type. - - For a scroll action, this property is always set to `scroll`. - """ - - x: Required[int] - """The x-coordinate where the scroll occurred.""" - - y: Required[int] - """The y-coordinate where the scroll occurred.""" - - -class ActionType(TypedDict, total=False): - text: Required[str] - """The text to type.""" - - type: Required[Literal["type"]] - """Specifies the event type. - - For a type action, this property is always set to `type`. - """ - - -class ActionWait(TypedDict, total=False): - type: Required[Literal["wait"]] - """Specifies the event type. - - For a wait action, this property is always set to `wait`. - """ - - -Action: TypeAlias = Union[ - ActionClick, - ActionDoubleClick, - ActionDrag, - ActionKeyPress, - ActionMove, - ActionScreenshot, - ActionScroll, - ActionType, - ActionWait, -] - - -class ComputerToolCallParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the computer call.""" - - action: Required[Action] - """A click action.""" - - call_id: Required[str] - """An identifier used when responding to the tool call with output.""" - - pending_safety_checks: Required[Iterable[ComputerToolCallSafetyCheckParam]] - """The pending safety checks for the computer call.""" - - status: Required[Literal["in_progress", "completed", "incomplete"]] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - type: Required[Literal["computer_call"]] - """The type of the computer call. Always `computer_call`.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py deleted file mode 100644 index e24b9f35..00000000 --- a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .._models import BaseModel - -__all__ = ["ComputerToolCallSafetyCheck"] - - -class ComputerToolCallSafetyCheck(BaseModel): - id: str - """The ID of the pending safety check.""" - - code: str - """The type of the pending safety check.""" - - message: str - """Details about the pending safety check.""" diff --git a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py b/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py deleted file mode 100644 index 859d6b59..00000000 --- a/src/digitalocean_genai_sdk/types/computer_tool_call_safety_check_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["ComputerToolCallSafetyCheckParam"] - - -class ComputerToolCallSafetyCheckParam(TypedDict, total=False): - id: Required[str] - """The ID of the pending safety check.""" - - code: Required[str] - """The type of the pending safety check.""" - - message: Required[str] - """Details about the pending safety check.""" diff --git a/src/digitalocean_genai_sdk/types/create_thread_request_param.py b/src/digitalocean_genai_sdk/types/create_thread_request_param.py deleted file mode 100644 index 3a8f59b4..00000000 --- a/src/digitalocean_genai_sdk/types/create_thread_request_param.py +++ /dev/null @@ -1,130 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .threads.create_message_request_param import CreateMessageRequestParam - -__all__ = [ - "CreateThreadRequestParam", - "ToolResources", - "ToolResourcesCodeInterpreter", - "ToolResourcesFileSearch", - "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic", -] - - -class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy, - ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy, -] - - -class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy - """The chunking strategy used to chunk the file(s). - - If not set, will use the `auto` strategy. - """ - - file_ids: List[str] - """A list of [file](/docs/api-reference/files) IDs to add to the vector store. - - There can be a maximum of 10000 files in a vector store. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - -class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] - """ - The [vector store](/docs/api-reference/vector-stores/object) attached to this - thread. There can be a maximum of 1 vector store attached to the thread. - """ - - vector_stores: Iterable[ToolResourcesFileSearchVectorStore] - """ - A helper to create a [vector store](/docs/api-reference/vector-stores/object) - with file_ids and attach it to this thread. There can be a maximum of 1 vector - store attached to the thread. - """ - - -class ToolResources(TypedDict, total=False): - code_interpreter: ToolResourcesCodeInterpreter - - file_search: ToolResourcesFileSearch - - -class CreateThreadRequestParam(TypedDict, total=False): - messages: Iterable[CreateMessageRequestParam] - """A list of [messages](/docs/api-reference/messages) to start the thread with.""" - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - tool_resources: Optional[ToolResources] - """ - A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ diff --git a/src/digitalocean_genai_sdk/types/embedding_create_params.py b/src/digitalocean_genai_sdk/types/embedding_create_params.py index caf65415..d3e923ad 100644 --- a/src/digitalocean_genai_sdk/types/embedding_create_params.py +++ b/src/digitalocean_genai_sdk/types/embedding_create_params.py @@ -2,47 +2,27 @@ from __future__ import annotations -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypedDict +from typing import List, Union +from typing_extensions import Required, TypedDict __all__ = ["EmbeddingCreateParams"] class EmbeddingCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]] + input: Required[Union[str, List[str]]] """Input text to embed, encoded as a string or array of tokens. - To embed multiple inputs in a single request, pass an array of strings or array - of token arrays. The input must not exceed the max input tokens for the model - (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any - array must be 2048 dimensions or less. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. Some models may also impose a limit on total number of - tokens summed across inputs. + To embed multiple inputs in a single request, pass an array of strings. """ - model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]] + model: Required[str] """ID of the model to use. - You can use the [List models](/docs/api-reference/models/list) API to see all of - your available models, or see our [Model overview](/docs/models) for - descriptions of them. - """ - - dimensions: int - """The number of dimensions the resulting output embeddings should have. - - Only supported in `text-embedding-3` and later models. - """ - - encoding_format: Literal["float", "base64"] - """The format to return the embeddings in. - - Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + You can use the List models API to see all of your available models. """ user: str """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. """ diff --git a/src/digitalocean_genai_sdk/types/embedding_create_response.py b/src/digitalocean_genai_sdk/types/embedding_create_response.py index e85daaba..19c474fd 100644 --- a/src/digitalocean_genai_sdk/types/embedding_create_response.py +++ b/src/digitalocean_genai_sdk/types/embedding_create_response.py @@ -10,11 +10,7 @@ class Data(BaseModel): embedding: List[float] - """The embedding vector, which is a list of floats. - - The length of vector depends on the model as listed in the - [embedding guide](/docs/guides/embeddings). - """ + """The embedding vector, which is a list of floats.""" index: int """The index of the embedding in the list of embeddings.""" diff --git a/src/digitalocean_genai_sdk/types/file_delete_response.py b/src/digitalocean_genai_sdk/types/file_delete_response.py deleted file mode 100644 index 26e2e053..00000000 --- a/src/digitalocean_genai_sdk/types/file_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["FileDeleteResponse"] - - -class FileDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["file"] diff --git a/src/digitalocean_genai_sdk/types/file_list_params.py b/src/digitalocean_genai_sdk/types/file_list_params.py deleted file mode 100644 index 058d874c..00000000 --- a/src/digitalocean_genai_sdk/types/file_list_params.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["FileListParams"] - - -class FileListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 10,000, and the default is 10,000. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ - - purpose: str - """Only return files with the given purpose.""" diff --git a/src/digitalocean_genai_sdk/types/file_list_response.py b/src/digitalocean_genai_sdk/types/file_list_response.py deleted file mode 100644 index db9ef641..00000000 --- a/src/digitalocean_genai_sdk/types/file_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel -from .openai_file import OpenAIFile - -__all__ = ["FileListResponse"] - - -class FileListResponse(BaseModel): - data: List[OpenAIFile] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py deleted file mode 100644 index 20c945db..00000000 --- a/src/digitalocean_genai_sdk/types/file_retrieve_content_response.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import TypeAlias - -__all__ = ["FileRetrieveContentResponse"] - -FileRetrieveContentResponse: TypeAlias = str diff --git a/src/digitalocean_genai_sdk/types/file_search_ranker.py b/src/digitalocean_genai_sdk/types/file_search_ranker.py deleted file mode 100644 index d4aabe5a..00000000 --- a/src/digitalocean_genai_sdk/types/file_search_ranker.py +++ /dev/null @@ -1,7 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["FileSearchRanker"] - -FileSearchRanker: TypeAlias = Literal["auto", "default_2024_08_21"] diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call.py b/src/digitalocean_genai_sdk/types/file_search_tool_call.py deleted file mode 100644 index 04542379..00000000 --- a/src/digitalocean_genai_sdk/types/file_search_tool_call.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["FileSearchToolCall", "Result"] - - -class Result(BaseModel): - attributes: Optional[Dict[str, Union[str, float, bool]]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ - - file_id: Optional[str] = None - """The unique ID of the file.""" - - filename: Optional[str] = None - """The name of the file.""" - - score: Optional[float] = None - """The relevance score of the file - a value between 0 and 1.""" - - text: Optional[str] = None - """The text that was retrieved from the file.""" - - -class FileSearchToolCall(BaseModel): - id: str - """The unique ID of the file search tool call.""" - - queries: List[str] - """The queries used to search for files.""" - - status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] - """The status of the file search tool call. - - One of `in_progress`, `searching`, `incomplete` or `failed`, - """ - - type: Literal["file_search_call"] - """The type of the file search tool call. Always `file_search_call`.""" - - results: Optional[List[Result]] = None - """The results of the file search tool call.""" diff --git a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py deleted file mode 100644 index 315dc90e..00000000 --- a/src/digitalocean_genai_sdk/types/file_search_tool_call_param.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["FileSearchToolCallParam", "Result"] - - -class Result(TypedDict, total=False): - attributes: Optional[Dict[str, Union[str, float, bool]]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ - - file_id: str - """The unique ID of the file.""" - - filename: str - """The name of the file.""" - - score: float - """The relevance score of the file - a value between 0 and 1.""" - - text: str - """The text that was retrieved from the file.""" - - -class FileSearchToolCallParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the file search tool call.""" - - queries: Required[List[str]] - """The queries used to search for files.""" - - status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]] - """The status of the file search tool call. - - One of `in_progress`, `searching`, `incomplete` or `failed`, - """ - - type: Required[Literal["file_search_call"]] - """The type of the file search tool call. Always `file_search_call`.""" - - results: Optional[Iterable[Result]] - """The results of the file search tool call.""" diff --git a/src/digitalocean_genai_sdk/types/file_upload_params.py b/src/digitalocean_genai_sdk/types/file_upload_params.py deleted file mode 100644 index 5b42fc50..00000000 --- a/src/digitalocean_genai_sdk/types/file_upload_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from .._types import FileTypes - -__all__ = ["FileUploadParams"] - - -class FileUploadParams(TypedDict, total=False): - file: Required[FileTypes] - """The File object (not file name) to be uploaded.""" - - purpose: Required[Literal["assistants", "batch", "fine-tune", "vision", "user_data", "evals"]] - """The intended purpose of the uploaded file. - - One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch - API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision - fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used - for eval data sets - """ diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py index 6b7dcea7..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py +++ b/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py @@ -1,10 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .fine_tuning_job import FineTuningJob as FineTuningJob -from .job_list_params import JobListParams as JobListParams -from .fine_tune_method import FineTuneMethod as FineTuneMethod -from .job_create_params import JobCreateParams as JobCreateParams -from .job_list_response import JobListResponse as JobListResponse -from .fine_tune_method_param import FineTuneMethodParam as FineTuneMethodParam diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py index 6b30e048..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py +++ b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py @@ -1,10 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .permission_create_params import PermissionCreateParams as PermissionCreateParams -from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse -from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams -from .list_fine_tuning_checkpoint_permission import ( - ListFineTuningCheckpointPermission as ListFineTuningCheckpointPermission, -) diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py deleted file mode 100644 index 9136bf5d..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/list_fine_tuning_checkpoint_permission.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["ListFineTuningCheckpointPermission", "Data"] - - -class Data(BaseModel): - id: str - """The permission identifier, which can be referenced in the API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the permission was created.""" - - object: Literal["checkpoint.permission"] - """The object type, which is always "checkpoint.permission".""" - - project_id: str - """The project identifier that the permission is for.""" - - -class ListFineTuningCheckpointPermission(BaseModel): - data: List[Data] - - has_more: bool - - object: Literal["list"] - - first_id: Optional[str] = None - - last_id: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py deleted file mode 100644 index 92f98f21..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Required, TypedDict - -__all__ = ["PermissionCreateParams"] - - -class PermissionCreateParams(TypedDict, total=False): - project_ids: Required[List[str]] - """The project identifiers to grant access to.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py deleted file mode 100644 index 1a92d912..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_delete_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["PermissionDeleteResponse"] - - -class PermissionDeleteResponse(BaseModel): - id: str - """The ID of the fine-tuned model checkpoint permission that was deleted.""" - - deleted: bool - """Whether the fine-tuned model checkpoint permission was successfully deleted.""" - - object: Literal["checkpoint.permission"] - """The object type, which is always "checkpoint.permission".""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py deleted file mode 100644 index 6e66a867..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/permission_retrieve_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["PermissionRetrieveParams"] - - -class PermissionRetrieveParams(TypedDict, total=False): - after: str - """Identifier for the last permission ID from the previous pagination request.""" - - limit: int - """Number of permissions to retrieve.""" - - order: Literal["ascending", "descending"] - """The order in which to retrieve permissions.""" - - project_id: str - """The ID of the project to get permissions for.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py deleted file mode 100644 index 6ad8f7a5..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method.py +++ /dev/null @@ -1,78 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["FineTuneMethod", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"] - - -class DpoHyperparameters(BaseModel): - batch_size: Union[Literal["auto"], int, None] = None - """Number of examples in each batch. - - A larger batch size means that model parameters are updated less frequently, but - with lower variance. - """ - - beta: Union[Literal["auto"], float, None] = None - """The beta value for the DPO method. - - A higher beta value will increase the weight of the penalty between the policy - and reference model. - """ - - learning_rate_multiplier: Union[Literal["auto"], float, None] = None - """Scaling factor for the learning rate. - - A smaller learning rate may be useful to avoid overfitting. - """ - - n_epochs: Union[Literal["auto"], int, None] = None - """The number of epochs to train the model for. - - An epoch refers to one full cycle through the training dataset. - """ - - -class Dpo(BaseModel): - hyperparameters: Optional[DpoHyperparameters] = None - """The hyperparameters used for the fine-tuning job.""" - - -class SupervisedHyperparameters(BaseModel): - batch_size: Union[Literal["auto"], int, None] = None - """Number of examples in each batch. - - A larger batch size means that model parameters are updated less frequently, but - with lower variance. - """ - - learning_rate_multiplier: Union[Literal["auto"], float, None] = None - """Scaling factor for the learning rate. - - A smaller learning rate may be useful to avoid overfitting. - """ - - n_epochs: Union[Literal["auto"], int, None] = None - """The number of epochs to train the model for. - - An epoch refers to one full cycle through the training dataset. - """ - - -class Supervised(BaseModel): - hyperparameters: Optional[SupervisedHyperparameters] = None - """The hyperparameters used for the fine-tuning job.""" - - -class FineTuneMethod(BaseModel): - dpo: Optional[Dpo] = None - """Configuration for the DPO fine-tuning method.""" - - supervised: Optional[Supervised] = None - """Configuration for the supervised fine-tuning method.""" - - type: Optional[Literal["supervised", "dpo"]] = None - """The type of method. Is either `supervised` or `dpo`.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py deleted file mode 100644 index e28abc93..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tune_method_param.py +++ /dev/null @@ -1,78 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, TypedDict - -__all__ = ["FineTuneMethodParam", "Dpo", "DpoHyperparameters", "Supervised", "SupervisedHyperparameters"] - - -class DpoHyperparameters(TypedDict, total=False): - batch_size: Union[Literal["auto"], int] - """Number of examples in each batch. - - A larger batch size means that model parameters are updated less frequently, but - with lower variance. - """ - - beta: Union[Literal["auto"], float] - """The beta value for the DPO method. - - A higher beta value will increase the weight of the penalty between the policy - and reference model. - """ - - learning_rate_multiplier: Union[Literal["auto"], float] - """Scaling factor for the learning rate. - - A smaller learning rate may be useful to avoid overfitting. - """ - - n_epochs: Union[Literal["auto"], int] - """The number of epochs to train the model for. - - An epoch refers to one full cycle through the training dataset. - """ - - -class Dpo(TypedDict, total=False): - hyperparameters: DpoHyperparameters - """The hyperparameters used for the fine-tuning job.""" - - -class SupervisedHyperparameters(TypedDict, total=False): - batch_size: Union[Literal["auto"], int] - """Number of examples in each batch. - - A larger batch size means that model parameters are updated less frequently, but - with lower variance. - """ - - learning_rate_multiplier: Union[Literal["auto"], float] - """Scaling factor for the learning rate. - - A smaller learning rate may be useful to avoid overfitting. - """ - - n_epochs: Union[Literal["auto"], int] - """The number of epochs to train the model for. - - An epoch refers to one full cycle through the training dataset. - """ - - -class Supervised(TypedDict, total=False): - hyperparameters: SupervisedHyperparameters - """The hyperparameters used for the fine-tuning job.""" - - -class FineTuneMethodParam(TypedDict, total=False): - dpo: Dpo - """Configuration for the DPO fine-tuning method.""" - - supervised: Supervised - """Configuration for the supervised fine-tuning method.""" - - type: Literal["supervised", "dpo"] - """The type of method. Is either `supervised` or `dpo`.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py b/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py deleted file mode 100644 index 29f387a1..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/fine_tuning_job.py +++ /dev/null @@ -1,182 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .fine_tune_method import FineTuneMethod - -__all__ = ["FineTuningJob", "Error", "Hyperparameters", "Integration", "IntegrationWandb"] - - -class Error(BaseModel): - code: str - """A machine-readable error code.""" - - message: str - """A human-readable error message.""" - - param: Optional[str] = None - """The parameter that was invalid, usually `training_file` or `validation_file`. - - This field will be null if the failure was not parameter-specific. - """ - - -class Hyperparameters(BaseModel): - batch_size: Union[Literal["auto"], int, None] = None - """Number of examples in each batch. - - A larger batch size means that model parameters are updated less frequently, but - with lower variance. - """ - - learning_rate_multiplier: Union[Literal["auto"], float, None] = None - """Scaling factor for the learning rate. - - A smaller learning rate may be useful to avoid overfitting. - """ - - n_epochs: Union[Literal["auto"], int, None] = None - """The number of epochs to train the model for. - - An epoch refers to one full cycle through the training dataset. - """ - - -class IntegrationWandb(BaseModel): - project: str - """The name of the project that the new run will be created under.""" - - entity: Optional[str] = None - """The entity to use for the run. - - This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered - WandB API key is used. - """ - - name: Optional[str] = None - """A display name to set for the run. - - If not set, we will use the Job ID as the name. - """ - - tags: Optional[List[str]] = None - """A list of tags to be attached to the newly created run. - - These tags are passed through directly to WandB. Some default tags are generated - by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - """ - - -class Integration(BaseModel): - type: Literal["wandb"] - """The type of the integration being enabled for the fine-tuning job""" - - wandb: IntegrationWandb - """The settings for your integration with Weights and Biases. - - This payload specifies the project that metrics will be sent to. Optionally, you - can set an explicit display name for your run, add tags to your run, and set a - default entity (team, username, etc) to be associated with your run. - """ - - -class FineTuningJob(BaseModel): - id: str - """The object identifier, which can be referenced in the API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" - - error: Optional[Error] = None - """ - For fine-tuning jobs that have `failed`, this will contain more information on - the cause of the failure. - """ - - fine_tuned_model: Optional[str] = None - """The name of the fine-tuned model that is being created. - - The value will be null if the fine-tuning job is still running. - """ - - finished_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the fine-tuning job was finished. - - The value will be null if the fine-tuning job is still running. - """ - - hyperparameters: Hyperparameters - """The hyperparameters used for the fine-tuning job. - - This value will only be returned when running `supervised` jobs. - """ - - model: str - """The base model that is being fine-tuned.""" - - object: Literal["fine_tuning.job"] - """The object type, which is always "fine_tuning.job".""" - - organization_id: str - """The organization that owns the fine-tuning job.""" - - result_files: List[str] - """The compiled results file ID(s) for the fine-tuning job. - - You can retrieve the results with the - [Files API](/docs/api-reference/files/retrieve-contents). - """ - - seed: int - """The seed used for the fine-tuning job.""" - - status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"] - """ - The current status of the fine-tuning job, which can be either - `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - """ - - trained_tokens: Optional[int] = None - """The total number of billable tokens processed by this fine-tuning job. - - The value will be null if the fine-tuning job is still running. - """ - - training_file: str - """The file ID used for training. - - You can retrieve the training data with the - [Files API](/docs/api-reference/files/retrieve-contents). - """ - - validation_file: Optional[str] = None - """The file ID used for validation. - - You can retrieve the validation results with the - [Files API](/docs/api-reference/files/retrieve-contents). - """ - - estimated_finish: Optional[int] = None - """ - The Unix timestamp (in seconds) for when the fine-tuning job is estimated to - finish. The value will be null if the fine-tuning job is not running. - """ - - integrations: Optional[List[Integration]] = None - """A list of integrations to enable for this fine-tuning job.""" - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - method: Optional[FineTuneMethod] = None - """The method used for fine-tuning.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py deleted file mode 100644 index a538e659..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/job_create_params.py +++ /dev/null @@ -1,152 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict - -from .fine_tune_method_param import FineTuneMethodParam - -__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb"] - - -class JobCreateParams(TypedDict, total=False): - model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]] - """The name of the model to fine-tune. - - You can select one of the - [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - """ - - training_file: Required[str] - """The ID of an uploaded file that contains training data. - - See [upload file](/docs/api-reference/files/create) for how to upload a file. - - Your dataset must be formatted as a JSONL file. Additionally, you must upload - your file with the purpose `fine-tune`. - - The contents of the file should differ depending on if the model uses the - [chat](/docs/api-reference/fine-tuning/chat-input), - [completions](/docs/api-reference/fine-tuning/completions-input) format, or if - the fine-tuning method uses the - [preference](/docs/api-reference/fine-tuning/preference-input) format. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - """ - - hyperparameters: Hyperparameters - """ - The hyperparameters used for the fine-tuning job. This value is now deprecated - in favor of `method`, and should be passed in under the `method` parameter. - """ - - integrations: Optional[Iterable[Integration]] - """A list of integrations to enable for your fine-tuning job.""" - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - method: FineTuneMethodParam - """The method used for fine-tuning.""" - - seed: Optional[int] - """The seed controls the reproducibility of the job. - - Passing in the same seed and job parameters should produce the same results, but - may differ in rare cases. If a seed is not specified, one will be generated for - you. - """ - - suffix: Optional[str] - """ - A string of up to 64 characters that will be added to your fine-tuned model - name. - - For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. - """ - - validation_file: Optional[str] - """The ID of an uploaded file that contains validation data. - - If you provide this file, the data is used to generate validation metrics - periodically during fine-tuning. These metrics can be viewed in the fine-tuning - results file. The same data should not be present in both train and validation - files. - - Your dataset must be formatted as a JSONL file. You must upload your file with - the purpose `fine-tune`. - - See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. - """ - - -class Hyperparameters(TypedDict, total=False): - batch_size: Union[Literal["auto"], int] - """Number of examples in each batch. - - A larger batch size means that model parameters are updated less frequently, but - with lower variance. - """ - - learning_rate_multiplier: Union[Literal["auto"], float] - """Scaling factor for the learning rate. - - A smaller learning rate may be useful to avoid overfitting. - """ - - n_epochs: Union[Literal["auto"], int] - """The number of epochs to train the model for. - - An epoch refers to one full cycle through the training dataset. - """ - - -class IntegrationWandb(TypedDict, total=False): - project: Required[str] - """The name of the project that the new run will be created under.""" - - entity: Optional[str] - """The entity to use for the run. - - This allows you to set the team or username of the WandB user that you would - like associated with the run. If not set, the default entity for the registered - WandB API key is used. - """ - - name: Optional[str] - """A display name to set for the run. - - If not set, we will use the Job ID as the name. - """ - - tags: List[str] - """A list of tags to be attached to the newly created run. - - These tags are passed through directly to WandB. Some default tags are generated - by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". - """ - - -class Integration(TypedDict, total=False): - type: Required[Literal["wandb"]] - """The type of integration to enable. - - Currently, only "wandb" (Weights and Biases) is supported. - """ - - wandb: Required[IntegrationWandb] - """The settings for your integration with Weights and Biases. - - This payload specifies the project that metrics will be sent to. Optionally, you - can set an explicit display name for your run, add tags to your run, and set a - default entity (team, username, etc) to be associated with your run. - """ diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py deleted file mode 100644 index b79f3ce8..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_params.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import TypedDict - -__all__ = ["JobListParams"] - - -class JobListParams(TypedDict, total=False): - after: str - """Identifier for the last job from the previous pagination request.""" - - limit: int - """Number of fine-tuning jobs to retrieve.""" - - metadata: Optional[Dict[str, str]] - """Optional metadata filter. - - To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to - indicate no metadata. - """ diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py deleted file mode 100644 index ea6eb6a8..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/job_list_response.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from ..._models import BaseModel -from .fine_tuning_job import FineTuningJob - -__all__ = ["JobListResponse"] - - -class JobListResponse(BaseModel): - data: List[FineTuningJob] - - has_more: bool - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py index 9ba11022..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py +++ b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py @@ -1,8 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .event_retrieve_params import EventRetrieveParams as EventRetrieveParams -from .event_retrieve_response import EventRetrieveResponse as EventRetrieveResponse -from .checkpoint_retrieve_params import CheckpointRetrieveParams as CheckpointRetrieveParams -from .checkpoint_retrieve_response import CheckpointRetrieveResponse as CheckpointRetrieveResponse diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py deleted file mode 100644 index 34666a9f..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["CheckpointRetrieveParams"] - - -class CheckpointRetrieveParams(TypedDict, total=False): - after: str - """Identifier for the last checkpoint ID from the previous pagination request.""" - - limit: int - """Number of checkpoints to retrieve.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py deleted file mode 100644 index bf0af44d..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/checkpoint_retrieve_response.py +++ /dev/null @@ -1,59 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["CheckpointRetrieveResponse", "Data", "DataMetrics"] - - -class DataMetrics(BaseModel): - full_valid_loss: Optional[float] = None - - full_valid_mean_token_accuracy: Optional[float] = None - - step: Optional[float] = None - - train_loss: Optional[float] = None - - train_mean_token_accuracy: Optional[float] = None - - valid_loss: Optional[float] = None - - valid_mean_token_accuracy: Optional[float] = None - - -class Data(BaseModel): - id: str - """The checkpoint identifier, which can be referenced in the API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the checkpoint was created.""" - - fine_tuned_model_checkpoint: str - """The name of the fine-tuned checkpoint model that is created.""" - - fine_tuning_job_id: str - """The name of the fine-tuning job that this checkpoint was created from.""" - - metrics: DataMetrics - """Metrics at the step number during the fine-tuning job.""" - - object: Literal["fine_tuning.job.checkpoint"] - """The object type, which is always "fine_tuning.job.checkpoint".""" - - step_number: int - """The step number that the checkpoint was created at.""" - - -class CheckpointRetrieveResponse(BaseModel): - data: List[Data] - - has_more: bool - - object: Literal["list"] - - first_id: Optional[str] = None - - last_id: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py deleted file mode 100644 index f0162e0e..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["EventRetrieveParams"] - - -class EventRetrieveParams(TypedDict, total=False): - after: str - """Identifier for the last event from the previous pagination request.""" - - limit: int - """Number of events to retrieve.""" diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py b/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py deleted file mode 100644 index 8c22fe30..00000000 --- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/event_retrieve_response.py +++ /dev/null @@ -1,40 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import builtins -from typing import List, Optional -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["EventRetrieveResponse", "Data"] - - -class Data(BaseModel): - id: str - """The object identifier.""" - - created_at: int - """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" - - level: Literal["info", "warn", "error"] - """The log level of the event.""" - - message: str - """The message of the event.""" - - object: Literal["fine_tuning.job.event"] - """The object type, which is always "fine_tuning.job.event".""" - - data: Optional[builtins.object] = None - """The data associated with the event.""" - - type: Optional[Literal["message", "metrics"]] = None - """The type of event.""" - - -class EventRetrieveResponse(BaseModel): - data: List[Data] - - has_more: bool - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/function_object.py b/src/digitalocean_genai_sdk/types/function_object.py deleted file mode 100644 index 4fe27f86..00000000 --- a/src/digitalocean_genai_sdk/types/function_object.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional - -from .._models import BaseModel - -__all__ = ["FunctionObject"] - - -class FunctionObject(BaseModel): - name: str - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: Optional[str] = None - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - parameters: Optional[Dict[str, object]] = None - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](/docs/guides/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - Omitting `parameters` defines a function with an empty parameter list. - """ - - strict: Optional[bool] = None - """Whether to enable strict schema adherence when generating the function call. - - If set to true, the model will follow the exact schema defined in the - `parameters` field. Only a subset of JSON Schema is supported when `strict` is - `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). - """ diff --git a/src/digitalocean_genai_sdk/types/function_object_param.py b/src/digitalocean_genai_sdk/types/function_object_param.py deleted file mode 100644 index 1a358408..00000000 --- a/src/digitalocean_genai_sdk/types/function_object_param.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["FunctionObjectParam"] - - -class FunctionObjectParam(TypedDict, total=False): - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - parameters: Dict[str, object] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](/docs/guides/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - Omitting `parameters` defines a function with an empty parameter list. - """ - - strict: Optional[bool] - """Whether to enable strict schema adherence when generating the function call. - - If set to true, the model will follow the exact schema defined in the - `parameters` field. Only a subset of JSON Schema is supported when `strict` is - `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). - """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call.py b/src/digitalocean_genai_sdk/types/function_tool_call.py deleted file mode 100644 index ecdb4a02..00000000 --- a/src/digitalocean_genai_sdk/types/function_tool_call.py +++ /dev/null @@ -1,32 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["FunctionToolCall"] - - -class FunctionToolCall(BaseModel): - arguments: str - """A JSON string of the arguments to pass to the function.""" - - call_id: str - """The unique ID of the function tool call generated by the model.""" - - name: str - """The name of the function to run.""" - - type: Literal["function_call"] - """The type of the function tool call. Always `function_call`.""" - - id: Optional[str] = None - """The unique ID of the function tool call.""" - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output.py b/src/digitalocean_genai_sdk/types/function_tool_call_output.py deleted file mode 100644 index 4cbe27ce..00000000 --- a/src/digitalocean_genai_sdk/types/function_tool_call_output.py +++ /dev/null @@ -1,32 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["FunctionToolCallOutput"] - - -class FunctionToolCallOutput(BaseModel): - call_id: str - """The unique ID of the function tool call generated by the model.""" - - output: str - """A JSON string of the output of the function tool call.""" - - type: Literal["function_call_output"] - """The type of the function tool call output. Always `function_call_output`.""" - - id: Optional[str] = None - """The unique ID of the function tool call output. - - Populated when this item is returned via API. - """ - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py deleted file mode 100644 index 49a573ed..00000000 --- a/src/digitalocean_genai_sdk/types/function_tool_call_output_param.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["FunctionToolCallOutputParam"] - - -class FunctionToolCallOutputParam(TypedDict, total=False): - call_id: Required[str] - """The unique ID of the function tool call generated by the model.""" - - output: Required[str] - """A JSON string of the output of the function tool call.""" - - type: Required[Literal["function_call_output"]] - """The type of the function tool call output. Always `function_call_output`.""" - - id: str - """The unique ID of the function tool call output. - - Populated when this item is returned via API. - """ - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/function_tool_call_param.py b/src/digitalocean_genai_sdk/types/function_tool_call_param.py deleted file mode 100644 index 91e076b6..00000000 --- a/src/digitalocean_genai_sdk/types/function_tool_call_param.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["FunctionToolCallParam"] - - -class FunctionToolCallParam(TypedDict, total=False): - arguments: Required[str] - """A JSON string of the arguments to pass to the function.""" - - call_id: Required[str] - """The unique ID of the function tool call generated by the model.""" - - name: Required[str] - """The name of the function to run.""" - - type: Required[Literal["function_call"]] - """The type of the function tool call. Always `function_call`.""" - - id: str - """The unique ID of the function tool call.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/image_create_edit_params.py b/src/digitalocean_genai_sdk/types/image_create_edit_params.py deleted file mode 100644 index f84f5642..00000000 --- a/src/digitalocean_genai_sdk/types/image_create_edit_params.py +++ /dev/null @@ -1,60 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Optional -from typing_extensions import Literal, Required, TypedDict - -from .._types import FileTypes - -__all__ = ["ImageCreateEditParams"] - - -class ImageCreateEditParams(TypedDict, total=False): - image: Required[FileTypes] - """The image to edit. - - Must be a valid PNG file, less than 4MB, and square. If mask is not provided, - image must have transparency, which will be used as the mask. - """ - - prompt: Required[str] - """A text description of the desired image(s). - - The maximum length is 1000 characters. - """ - - mask: FileTypes - """An additional image whose fully transparent areas (e.g. - - where alpha is zero) indicate where `image` should be edited. Must be a valid - PNG file, less than 4MB, and have the same dimensions as `image`. - """ - - model: Union[str, Literal["dall-e-2"], None] - """The model to use for image generation. - - Only `dall-e-2` is supported at this time. - """ - - n: Optional[int] - """The number of images to generate. Must be between 1 and 10.""" - - response_format: Optional[Literal["url", "b64_json"]] - """The format in which the generated images are returned. - - Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the - image has been generated. - """ - - size: Optional[Literal["256x256", "512x512", "1024x1024"]] - """The size of the generated images. - - Must be one of `256x256`, `512x512`, or `1024x1024`. - """ - - user: str - """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ diff --git a/src/digitalocean_genai_sdk/types/image_create_generation_params.py b/src/digitalocean_genai_sdk/types/image_create_generation_params.py deleted file mode 100644 index e8cfbb18..00000000 --- a/src/digitalocean_genai_sdk/types/image_create_generation_params.py +++ /dev/null @@ -1,62 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Optional -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ImageCreateGenerationParams"] - - -class ImageCreateGenerationParams(TypedDict, total=False): - prompt: Required[str] - """A text description of the desired image(s). - - The maximum length is 1000 characters for `dall-e-2` and 4000 characters for - `dall-e-3`. - """ - - model: Union[str, Literal["dall-e-2", "dall-e-3"], None] - """The model to use for image generation.""" - - n: Optional[int] - """The number of images to generate. - - Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - """ - - quality: Literal["standard", "hd"] - """The quality of the image that will be generated. - - `hd` creates images with finer details and greater consistency across the image. - This param is only supported for `dall-e-3`. - """ - - response_format: Optional[Literal["url", "b64_json"]] - """The format in which the generated images are returned. - - Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the - image has been generated. - """ - - size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] - """The size of the generated images. - - Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one - of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. - """ - - style: Optional[Literal["vivid", "natural"]] - """The style of the generated images. - - Must be one of `vivid` or `natural`. Vivid causes the model to lean towards - generating hyper-real and dramatic images. Natural causes the model to produce - more natural, less hyper-real looking images. This param is only supported for - `dall-e-3`. - """ - - user: str - """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ diff --git a/src/digitalocean_genai_sdk/types/image_create_variation_params.py b/src/digitalocean_genai_sdk/types/image_create_variation_params.py deleted file mode 100644 index 64245a05..00000000 --- a/src/digitalocean_genai_sdk/types/image_create_variation_params.py +++ /dev/null @@ -1,49 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Optional -from typing_extensions import Literal, Required, TypedDict - -from .._types import FileTypes - -__all__ = ["ImageCreateVariationParams"] - - -class ImageCreateVariationParams(TypedDict, total=False): - image: Required[FileTypes] - """The image to use as the basis for the variation(s). - - Must be a valid PNG file, less than 4MB, and square. - """ - - model: Union[str, Literal["dall-e-2"], None] - """The model to use for image generation. - - Only `dall-e-2` is supported at this time. - """ - - n: Optional[int] - """The number of images to generate. - - Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. - """ - - response_format: Optional[Literal["url", "b64_json"]] - """The format in which the generated images are returned. - - Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the - image has been generated. - """ - - size: Optional[Literal["256x256", "512x512", "1024x1024"]] - """The size of the generated images. - - Must be one of `256x256`, `512x512`, or `1024x1024`. - """ - - user: str - """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ diff --git a/src/digitalocean_genai_sdk/types/images_response.py b/src/digitalocean_genai_sdk/types/images_response.py deleted file mode 100644 index 509e0069..00000000 --- a/src/digitalocean_genai_sdk/types/images_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel - -__all__ = ["ImagesResponse", "Data"] - - -class Data(BaseModel): - b64_json: Optional[str] = None - """ - The base64-encoded JSON of the generated image, if `response_format` is - `b64_json`. - """ - - revised_prompt: Optional[str] = None - """ - The prompt that was used to generate the image, if there was any revision to the - prompt. - """ - - url: Optional[str] = None - """The URL of the generated image, if `response_format` is `url` (default).""" - - -class ImagesResponse(BaseModel): - created: int - - data: List[Data] diff --git a/src/digitalocean_genai_sdk/types/includable.py b/src/digitalocean_genai_sdk/types/includable.py deleted file mode 100644 index 8b4920a2..00000000 --- a/src/digitalocean_genai_sdk/types/includable.py +++ /dev/null @@ -1,9 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal, TypeAlias - -__all__ = ["Includable"] - -Includable: TypeAlias = Literal[ - "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url" -] diff --git a/src/digitalocean_genai_sdk/types/input_content.py b/src/digitalocean_genai_sdk/types/input_content.py deleted file mode 100644 index 04e37845..00000000 --- a/src/digitalocean_genai_sdk/types/input_content.py +++ /dev/null @@ -1,53 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = ["InputContent", "InputText", "InputImage", "InputFile"] - - -class InputText(BaseModel): - text: str - """The text input to the model.""" - - type: Literal["input_text"] - """The type of the input item. Always `input_text`.""" - - -class InputImage(BaseModel): - detail: Literal["high", "low", "auto"] - """The detail level of the image to be sent to the model. - - One of `high`, `low`, or `auto`. Defaults to `auto`. - """ - - type: Literal["input_image"] - """The type of the input item. Always `input_image`.""" - - file_id: Optional[str] = None - """The ID of the file to be sent to the model.""" - - image_url: Optional[str] = None - """The URL of the image to be sent to the model. - - A fully qualified URL or base64 encoded image in a data URL. - """ - - -class InputFile(BaseModel): - type: Literal["input_file"] - """The type of the input item. Always `input_file`.""" - - file_data: Optional[str] = None - """The content of the file to be sent to the model.""" - - file_id: Optional[str] = None - """The ID of the file to be sent to the model.""" - - filename: Optional[str] = None - """The name of the file to be sent to the model.""" - - -InputContent: TypeAlias = Union[InputText, InputImage, InputFile] diff --git a/src/digitalocean_genai_sdk/types/input_content_param.py b/src/digitalocean_genai_sdk/types/input_content_param.py deleted file mode 100644 index ed0bdf62..00000000 --- a/src/digitalocean_genai_sdk/types/input_content_param.py +++ /dev/null @@ -1,53 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["InputContentParam", "InputText", "InputImage", "InputFile"] - - -class InputText(TypedDict, total=False): - text: Required[str] - """The text input to the model.""" - - type: Required[Literal["input_text"]] - """The type of the input item. Always `input_text`.""" - - -class InputImage(TypedDict, total=False): - detail: Required[Literal["high", "low", "auto"]] - """The detail level of the image to be sent to the model. - - One of `high`, `low`, or `auto`. Defaults to `auto`. - """ - - type: Required[Literal["input_image"]] - """The type of the input item. Always `input_image`.""" - - file_id: Optional[str] - """The ID of the file to be sent to the model.""" - - image_url: Optional[str] - """The URL of the image to be sent to the model. - - A fully qualified URL or base64 encoded image in a data URL. - """ - - -class InputFile(TypedDict, total=False): - type: Required[Literal["input_file"]] - """The type of the input item. Always `input_file`.""" - - file_data: str - """The content of the file to be sent to the model.""" - - file_id: str - """The ID of the file to be sent to the model.""" - - filename: str - """The name of the file to be sent to the model.""" - - -InputContentParam: TypeAlias = Union[InputText, InputImage, InputFile] diff --git a/src/digitalocean_genai_sdk/types/input_message.py b/src/digitalocean_genai_sdk/types/input_message.py deleted file mode 100644 index 4dc5526f..00000000 --- a/src/digitalocean_genai_sdk/types/input_message.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .input_content import InputContent - -__all__ = ["InputMessage"] - - -class InputMessage(BaseModel): - content: List[InputContent] - """ - A list of one or many input items to the model, containing different content - types. - """ - - role: Literal["user", "system", "developer"] - """The role of the message input. One of `user`, `system`, or `developer`.""" - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always set to `message`.""" diff --git a/src/digitalocean_genai_sdk/types/input_message_param.py b/src/digitalocean_genai_sdk/types/input_message_param.py deleted file mode 100644 index 388c54ca..00000000 --- a/src/digitalocean_genai_sdk/types/input_message_param.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Literal, Required, TypedDict - -from .input_content_param import InputContentParam - -__all__ = ["InputMessageParam"] - - -class InputMessageParam(TypedDict, total=False): - content: Required[Iterable[InputContentParam]] - """ - A list of one or many input items to the model, containing different content - types. - """ - - role: Required[Literal["user", "system", "developer"]] - """The role of the message input. One of `user`, `system`, or `developer`.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - type: Literal["message"] - """The type of the message input. Always set to `message`.""" diff --git a/src/digitalocean_genai_sdk/types/model_delete_response.py b/src/digitalocean_genai_sdk/types/model_delete_response.py deleted file mode 100644 index 63b2d296..00000000 --- a/src/digitalocean_genai_sdk/types/model_delete_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .._models import BaseModel - -__all__ = ["ModelDeleteResponse"] - - -class ModelDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: str diff --git a/src/digitalocean_genai_sdk/types/model_response_properties.py b/src/digitalocean_genai_sdk/types/model_response_properties.py deleted file mode 100644 index 547c6391..00000000 --- a/src/digitalocean_genai_sdk/types/model_response_properties.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional - -from .._models import BaseModel - -__all__ = ["ModelResponseProperties"] - - -class ModelResponseProperties(BaseModel): - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - temperature: Optional[float] = None - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. We generally recommend altering - this or `top_p` but not both. - """ - - top_p: Optional[float] = None - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - """ - - user: Optional[str] = None - """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_params.py b/src/digitalocean_genai_sdk/types/moderation_classify_params.py deleted file mode 100644 index bcc99a1e..00000000 --- a/src/digitalocean_genai_sdk/types/moderation_classify_params.py +++ /dev/null @@ -1,59 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = [ - "ModerationClassifyParams", - "InputUnionMember2", - "InputUnionMember2UnionMember0", - "InputUnionMember2UnionMember0ImageURL", - "InputUnionMember2UnionMember1", -] - - -class ModerationClassifyParams(TypedDict, total=False): - input: Required[Union[str, List[str], Iterable[InputUnionMember2]]] - """Input (or inputs) to classify. - - Can be a single string, an array of strings, or an array of multi-modal input - objects similar to other models. - """ - - model: Union[ - str, - Literal[ - "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable" - ], - ] - """The content moderation model you would like to use. - - Learn more in [the moderation guide](/docs/guides/moderation), and learn about - available models [here](/docs/models#moderation). - """ - - -class InputUnionMember2UnionMember0ImageURL(TypedDict, total=False): - url: Required[str] - """Either a URL of the image or the base64 encoded image data.""" - - -class InputUnionMember2UnionMember0(TypedDict, total=False): - image_url: Required[InputUnionMember2UnionMember0ImageURL] - """Contains either an image URL or a data URL for a base64 encoded image.""" - - type: Required[Literal["image_url"]] - """Always `image_url`.""" - - -class InputUnionMember2UnionMember1(TypedDict, total=False): - text: Required[str] - """A string of text to classify.""" - - type: Required[Literal["text"]] - """Always `text`.""" - - -InputUnionMember2: TypeAlias = Union[InputUnionMember2UnionMember0, InputUnionMember2UnionMember1] diff --git a/src/digitalocean_genai_sdk/types/moderation_classify_response.py b/src/digitalocean_genai_sdk/types/moderation_classify_response.py deleted file mode 100644 index cfda7318..00000000 --- a/src/digitalocean_genai_sdk/types/moderation_classify_response.py +++ /dev/null @@ -1,203 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from pydantic import Field as FieldInfo - -from .._models import BaseModel - -__all__ = [ - "ModerationClassifyResponse", - "Result", - "ResultCategories", - "ResultCategoryAppliedInputTypes", - "ResultCategoryScores", -] - - -class ResultCategories(BaseModel): - harassment: bool - """ - Content that expresses, incites, or promotes harassing language towards any - target. - """ - - harassment_threatening: bool = FieldInfo(alias="harassment/threatening") - """ - Harassment content that also includes violence or serious harm towards any - target. - """ - - hate: bool - """ - Content that expresses, incites, or promotes hate based on race, gender, - ethnicity, religion, nationality, sexual orientation, disability status, or - caste. Hateful content aimed at non-protected groups (e.g., chess players) is - harassment. - """ - - hate_threatening: bool = FieldInfo(alias="hate/threatening") - """ - Hateful content that also includes violence or serious harm towards the targeted - group based on race, gender, ethnicity, religion, nationality, sexual - orientation, disability status, or caste. - """ - - illicit: Optional[bool] = None - """ - Content that includes instructions or advice that facilitate the planning or - execution of wrongdoing, or that gives advice or instruction on how to commit - illicit acts. For example, "how to shoplift" would fit this category. - """ - - illicit_violent: Optional[bool] = FieldInfo(alias="illicit/violent", default=None) - """ - Content that includes instructions or advice that facilitate the planning or - execution of wrongdoing that also includes violence, or that gives advice or - instruction on the procurement of any weapon. - """ - - self_harm: bool = FieldInfo(alias="self-harm") - """ - Content that promotes, encourages, or depicts acts of self-harm, such as - suicide, cutting, and eating disorders. - """ - - self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions") - """ - Content that encourages performing acts of self-harm, such as suicide, cutting, - and eating disorders, or that gives instructions or advice on how to commit such - acts. - """ - - self_harm_intent: bool = FieldInfo(alias="self-harm/intent") - """ - Content where the speaker expresses that they are engaging or intend to engage - in acts of self-harm, such as suicide, cutting, and eating disorders. - """ - - sexual: bool - """ - Content meant to arouse sexual excitement, such as the description of sexual - activity, or that promotes sexual services (excluding sex education and - wellness). - """ - - sexual_minors: bool = FieldInfo(alias="sexual/minors") - """Sexual content that includes an individual who is under 18 years old.""" - - violence: bool - """Content that depicts death, violence, or physical injury.""" - - violence_graphic: bool = FieldInfo(alias="violence/graphic") - """Content that depicts death, violence, or physical injury in graphic detail.""" - - -class ResultCategoryAppliedInputTypes(BaseModel): - harassment: List[Literal["text"]] - """The applied input type(s) for the category 'harassment'.""" - - harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening") - """The applied input type(s) for the category 'harassment/threatening'.""" - - hate: List[Literal["text"]] - """The applied input type(s) for the category 'hate'.""" - - hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening") - """The applied input type(s) for the category 'hate/threatening'.""" - - illicit: List[Literal["text"]] - """The applied input type(s) for the category 'illicit'.""" - - illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent") - """The applied input type(s) for the category 'illicit/violent'.""" - - self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm") - """The applied input type(s) for the category 'self-harm'.""" - - self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions") - """The applied input type(s) for the category 'self-harm/instructions'.""" - - self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent") - """The applied input type(s) for the category 'self-harm/intent'.""" - - sexual: List[Literal["text", "image"]] - """The applied input type(s) for the category 'sexual'.""" - - sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors") - """The applied input type(s) for the category 'sexual/minors'.""" - - violence: List[Literal["text", "image"]] - """The applied input type(s) for the category 'violence'.""" - - violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic") - """The applied input type(s) for the category 'violence/graphic'.""" - - -class ResultCategoryScores(BaseModel): - harassment: float - """The score for the category 'harassment'.""" - - harassment_threatening: float = FieldInfo(alias="harassment/threatening") - """The score for the category 'harassment/threatening'.""" - - hate: float - """The score for the category 'hate'.""" - - hate_threatening: float = FieldInfo(alias="hate/threatening") - """The score for the category 'hate/threatening'.""" - - illicit: float - """The score for the category 'illicit'.""" - - illicit_violent: float = FieldInfo(alias="illicit/violent") - """The score for the category 'illicit/violent'.""" - - self_harm: float = FieldInfo(alias="self-harm") - """The score for the category 'self-harm'.""" - - self_harm_instructions: float = FieldInfo(alias="self-harm/instructions") - """The score for the category 'self-harm/instructions'.""" - - self_harm_intent: float = FieldInfo(alias="self-harm/intent") - """The score for the category 'self-harm/intent'.""" - - sexual: float - """The score for the category 'sexual'.""" - - sexual_minors: float = FieldInfo(alias="sexual/minors") - """The score for the category 'sexual/minors'.""" - - violence: float - """The score for the category 'violence'.""" - - violence_graphic: float = FieldInfo(alias="violence/graphic") - """The score for the category 'violence/graphic'.""" - - -class Result(BaseModel): - categories: ResultCategories - """A list of the categories, and whether they are flagged or not.""" - - category_applied_input_types: ResultCategoryAppliedInputTypes - """ - A list of the categories along with the input type(s) that the score applies to. - """ - - category_scores: ResultCategoryScores - """A list of the categories along with their scores as predicted by model.""" - - flagged: bool - """Whether any of the below categories are flagged.""" - - -class ModerationClassifyResponse(BaseModel): - id: str - """The unique identifier for the moderation request.""" - - model: str - """The model used to generate the moderation results.""" - - results: List[Result] - """A list of moderation objects.""" diff --git a/src/digitalocean_genai_sdk/types/openai_file.py b/src/digitalocean_genai_sdk/types/openai_file.py deleted file mode 100644 index a8398a35..00000000 --- a/src/digitalocean_genai_sdk/types/openai_file.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["OpenAIFile"] - - -class OpenAIFile(BaseModel): - id: str - """The file identifier, which can be referenced in the API endpoints.""" - - bytes: int - """The size of the file, in bytes.""" - - created_at: int - """The Unix timestamp (in seconds) for when the file was created.""" - - filename: str - """The name of the file.""" - - object: Literal["file"] - """The object type, which is always `file`.""" - - purpose: Literal[ - "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision" - ] - """The intended purpose of the file. - - Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, - `fine-tune`, `fine-tune-results` and `vision`. - """ - - status: Literal["uploaded", "processed", "error"] - """Deprecated. - - The current status of the file, which can be either `uploaded`, `processed`, or - `error`. - """ - - expires_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the file will expire.""" - - status_details: Optional[str] = None - """Deprecated. - - For details on why a fine-tuning training file failed validation, see the - `error` field on `fine_tuning.job`. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/__init__.py b/src/digitalocean_genai_sdk/types/organization/__init__.py index 5b34f495..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/organization/__init__.py +++ b/src/digitalocean_genai_sdk/types/organization/__init__.py @@ -1,34 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .invite import Invite as Invite -from .project import Project as Project -from .admin_api_key import AdminAPIKey as AdminAPIKey -from .user_list_params import UserListParams as UserListParams -from .organization_user import OrganizationUser as OrganizationUser -from .invite_list_params import InviteListParams as InviteListParams -from .user_list_response import UserListResponse as UserListResponse -from .user_update_params import UserUpdateParams as UserUpdateParams -from .project_list_params import ProjectListParams as ProjectListParams -from .usage_images_params import UsageImagesParams as UsageImagesParams -from .invite_create_params import InviteCreateParams as InviteCreateParams -from .invite_list_response import InviteListResponse as InviteListResponse -from .user_delete_response import UserDeleteResponse as UserDeleteResponse -from .project_create_params import ProjectCreateParams as ProjectCreateParams -from .project_list_response import ProjectListResponse as ProjectListResponse -from .project_update_params import ProjectUpdateParams as ProjectUpdateParams -from .invite_delete_response import InviteDeleteResponse as InviteDeleteResponse -from .usage_embeddings_params import UsageEmbeddingsParams as UsageEmbeddingsParams -from .usage_completions_params import UsageCompletionsParams as UsageCompletionsParams -from .usage_moderations_params import UsageModerationsParams as UsageModerationsParams -from .admin_api_key_list_params import AdminAPIKeyListParams as AdminAPIKeyListParams -from .usage_vector_stores_params import UsageVectorStoresParams as UsageVectorStoresParams -from .admin_api_key_create_params import AdminAPIKeyCreateParams as AdminAPIKeyCreateParams -from .admin_api_key_list_response import AdminAPIKeyListResponse as AdminAPIKeyListResponse -from .usage_audio_speeches_params import UsageAudioSpeechesParams as UsageAudioSpeechesParams -from .admin_api_key_delete_response import AdminAPIKeyDeleteResponse as AdminAPIKeyDeleteResponse -from .usage_audio_transcriptions_params import UsageAudioTranscriptionsParams as UsageAudioTranscriptionsParams -from .usage_code_interpreter_sessions_params import ( - UsageCodeInterpreterSessionsParams as UsageCodeInterpreterSessionsParams, -) diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key.py deleted file mode 100644 index 8a57458f..00000000 --- a/src/digitalocean_genai_sdk/types/organization/admin_api_key.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["AdminAPIKey", "Owner"] - - -class Owner(BaseModel): - id: Optional[str] = None - - created_at: Optional[int] = None - - name: Optional[str] = None - - role: Optional[str] = None - - type: Optional[str] = None - - -class AdminAPIKey(BaseModel): - id: Optional[str] = None - - created_at: Optional[int] = None - - name: Optional[str] = None - - object: Optional[str] = None - - owner: Optional[Owner] = None - - redacted_value: Optional[str] = None - - value: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py deleted file mode 100644 index dccdfb8a..00000000 --- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_create_params.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["AdminAPIKeyCreateParams"] - - -class AdminAPIKeyCreateParams(TypedDict, total=False): - name: Required[str] diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py deleted file mode 100644 index b752558c..00000000 --- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["AdminAPIKeyDeleteResponse"] - - -class AdminAPIKeyDeleteResponse(BaseModel): - id: Optional[str] = None - - deleted: Optional[bool] = None - - object: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py deleted file mode 100644 index c3b3f510..00000000 --- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_params.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, TypedDict - -__all__ = ["AdminAPIKeyListParams"] - - -class AdminAPIKeyListParams(TypedDict, total=False): - after: Optional[str] - """Return keys with IDs that come after this ID in the pagination order.""" - - limit: int - """Maximum number of keys to return.""" - - order: Literal["asc", "desc"] - """Order results by creation time, ascending or descending.""" diff --git a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py deleted file mode 100644 index 8ef9beb7..00000000 --- a/src/digitalocean_genai_sdk/types/organization/admin_api_key_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from .admin_api_key import AdminAPIKey - -__all__ = ["AdminAPIKeyListResponse"] - - -class AdminAPIKeyListResponse(BaseModel): - data: Optional[List[AdminAPIKey]] = None - - first_id: Optional[str] = None - - has_more: Optional[bool] = None - - last_id: Optional[str] = None - - object: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/organization/invite.py b/src/digitalocean_genai_sdk/types/organization/invite.py deleted file mode 100644 index fd495caf..00000000 --- a/src/digitalocean_genai_sdk/types/organization/invite.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["Invite", "Project"] - - -class Project(BaseModel): - id: Optional[str] = None - """Project's public ID""" - - role: Optional[Literal["member", "owner"]] = None - """Project membership role""" - - -class Invite(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints""" - - email: str - """The email address of the individual to whom the invite was sent""" - - expires_at: int - """The Unix timestamp (in seconds) of when the invite expires.""" - - invited_at: int - """The Unix timestamp (in seconds) of when the invite was sent.""" - - object: Literal["organization.invite"] - """The object type, which is always `organization.invite`""" - - role: Literal["owner", "reader"] - """`owner` or `reader`""" - - status: Literal["accepted", "expired", "pending"] - """`accepted`,`expired`, or `pending`""" - - accepted_at: Optional[int] = None - """The Unix timestamp (in seconds) of when the invite was accepted.""" - - projects: Optional[List[Project]] = None - """The projects that were granted membership upon acceptance of the invite.""" diff --git a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py b/src/digitalocean_genai_sdk/types/organization/invite_create_params.py deleted file mode 100644 index 7709003f..00000000 --- a/src/digitalocean_genai_sdk/types/organization/invite_create_params.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["InviteCreateParams", "Project"] - - -class InviteCreateParams(TypedDict, total=False): - email: Required[str] - """Send an email to this address""" - - role: Required[Literal["reader", "owner"]] - """`owner` or `reader`""" - - projects: Iterable[Project] - """ - An array of projects to which membership is granted at the same time the org - invite is accepted. If omitted, the user will be invited to the default project - for compatibility with legacy behavior. - """ - - -class Project(TypedDict, total=False): - id: Required[str] - """Project's public ID""" - - role: Required[Literal["member", "owner"]] - """Project membership role""" diff --git a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py b/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py deleted file mode 100644 index 52bd47b9..00000000 --- a/src/digitalocean_genai_sdk/types/organization/invite_delete_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["InviteDeleteResponse"] - - -class InviteDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["organization.invite.deleted"] - """The object type, which is always `organization.invite.deleted`""" diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py b/src/digitalocean_genai_sdk/types/organization/invite_list_params.py deleted file mode 100644 index 678510d6..00000000 --- a/src/digitalocean_genai_sdk/types/organization/invite_list_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["InviteListParams"] - - -class InviteListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py b/src/digitalocean_genai_sdk/types/organization/invite_list_response.py deleted file mode 100644 index 2b646289..00000000 --- a/src/digitalocean_genai_sdk/types/organization/invite_list_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .invite import Invite -from ..._models import BaseModel - -__all__ = ["InviteListResponse"] - - -class InviteListResponse(BaseModel): - data: List[Invite] - - object: Literal["list"] - """The object type, which is always `list`""" - - first_id: Optional[str] = None - """The first `invite_id` in the retrieved `list`""" - - has_more: Optional[bool] = None - """ - The `has_more` property is used for pagination to indicate there are additional - results. - """ - - last_id: Optional[str] = None - """The last `invite_id` in the retrieved `list`""" diff --git a/src/digitalocean_genai_sdk/types/organization/organization_user.py b/src/digitalocean_genai_sdk/types/organization/organization_user.py deleted file mode 100644 index 890833f1..00000000 --- a/src/digitalocean_genai_sdk/types/organization/organization_user.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["OrganizationUser"] - - -class OrganizationUser(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints""" - - added_at: int - """The Unix timestamp (in seconds) of when the user was added.""" - - email: str - """The email address of the user""" - - name: str - """The name of the user""" - - object: Literal["organization.user"] - """The object type, which is always `organization.user`""" - - role: Literal["owner", "reader"] - """`owner` or `reader`""" diff --git a/src/digitalocean_genai_sdk/types/organization/project.py b/src/digitalocean_genai_sdk/types/organization/project.py deleted file mode 100644 index 731e8609..00000000 --- a/src/digitalocean_genai_sdk/types/organization/project.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["Project"] - - -class Project(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints""" - - created_at: int - """The Unix timestamp (in seconds) of when the project was created.""" - - name: str - """The name of the project. This appears in reporting.""" - - object: Literal["organization.project"] - """The object type, which is always `organization.project`""" - - status: Literal["active", "archived"] - """`active` or `archived`""" - - archived_at: Optional[int] = None - """The Unix timestamp (in seconds) of when the project was archived or `null`.""" diff --git a/src/digitalocean_genai_sdk/types/organization/project_create_params.py b/src/digitalocean_genai_sdk/types/organization/project_create_params.py deleted file mode 100644 index 0c18bc5b..00000000 --- a/src/digitalocean_genai_sdk/types/organization/project_create_params.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["ProjectCreateParams"] - - -class ProjectCreateParams(TypedDict, total=False): - name: Required[str] - """The friendly name of the project, this name appears in reports.""" diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_params.py b/src/digitalocean_genai_sdk/types/organization/project_list_params.py deleted file mode 100644 index f55fb8a3..00000000 --- a/src/digitalocean_genai_sdk/types/organization/project_list_params.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["ProjectListParams"] - - -class ProjectListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - include_archived: bool - """If `true` returns all projects including those that have been `archived`. - - Archived projects are not included by default. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/project_list_response.py b/src/digitalocean_genai_sdk/types/organization/project_list_response.py deleted file mode 100644 index 24a79f63..00000000 --- a/src/digitalocean_genai_sdk/types/organization/project_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from .project import Project -from ..._models import BaseModel - -__all__ = ["ProjectListResponse"] - - -class ProjectListResponse(BaseModel): - data: List[Project] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/project_update_params.py b/src/digitalocean_genai_sdk/types/organization/project_update_params.py deleted file mode 100644 index 0ba1984a..00000000 --- a/src/digitalocean_genai_sdk/types/organization/project_update_params.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["ProjectUpdateParams"] - - -class ProjectUpdateParams(TypedDict, total=False): - name: Required[str] - """The updated name of the project, this name appears in reports.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py index 4b0e0f9b..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py +++ b/src/digitalocean_genai_sdk/types/organization/projects/__init__.py @@ -1,24 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .api_key import APIKey as APIKey -from .rate_limit import RateLimit as RateLimit -from .project_user import ProjectUser as ProjectUser -from .service_account import ServiceAccount as ServiceAccount -from .user_add_params import UserAddParams as UserAddParams -from .user_list_params import UserListParams as UserListParams -from .user_list_response import UserListResponse as UserListResponse -from .user_update_params import UserUpdateParams as UserUpdateParams -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .user_delete_response import UserDeleteResponse as UserDeleteResponse -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .rate_limit_list_params import RateLimitListParams as RateLimitListParams -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .rate_limit_list_response import RateLimitListResponse as RateLimitListResponse -from .rate_limit_update_params import RateLimitUpdateParams as RateLimitUpdateParams -from .service_account_list_params import ServiceAccountListParams as ServiceAccountListParams -from .service_account_create_params import ServiceAccountCreateParams as ServiceAccountCreateParams -from .service_account_list_response import ServiceAccountListResponse as ServiceAccountListResponse -from .service_account_create_response import ServiceAccountCreateResponse as ServiceAccountCreateResponse -from .service_account_delete_response import ServiceAccountDeleteResponse as ServiceAccountDeleteResponse diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key.py deleted file mode 100644 index 276f6d9b..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/api_key.py +++ /dev/null @@ -1,40 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ...._models import BaseModel -from .project_user import ProjectUser -from .service_account import ServiceAccount - -__all__ = ["APIKey", "Owner"] - - -class Owner(BaseModel): - service_account: Optional[ServiceAccount] = None - """Represents an individual service account in a project.""" - - type: Optional[Literal["user", "service_account"]] = None - """`user` or `service_account`""" - - user: Optional[ProjectUser] = None - """Represents an individual user in a project.""" - - -class APIKey(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints""" - - created_at: int - """The Unix timestamp (in seconds) of when the API key was created""" - - name: str - """The name of the API key""" - - object: Literal["organization.project.api_key"] - """The object type, which is always `organization.project.api_key`""" - - owner: Owner - - redacted_value: str - """The redacted value of the API key""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py deleted file mode 100644 index c3ec64bd..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["APIKeyDeleteResponse"] - - -class APIKeyDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["organization.project.api_key.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py deleted file mode 100644 index 422a2851..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py deleted file mode 100644 index 669de6c6..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/api_key_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from .api_key import APIKey -from ...._models import BaseModel - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - data: List[APIKey] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py b/src/digitalocean_genai_sdk/types/organization/projects/project_user.py deleted file mode 100644 index afcdb514..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/project_user.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["ProjectUser"] - - -class ProjectUser(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints""" - - added_at: int - """The Unix timestamp (in seconds) of when the project was added.""" - - email: str - """The email address of the user""" - - name: str - """The name of the user""" - - object: Literal["organization.project.user"] - """The object type, which is always `organization.project.user`""" - - role: Literal["owner", "member"] - """`owner` or `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py deleted file mode 100644 index 1a9795f5..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit.py +++ /dev/null @@ -1,37 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["RateLimit"] - - -class RateLimit(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - max_requests_per_1_minute: int - """The maximum requests per minute.""" - - max_tokens_per_1_minute: int - """The maximum tokens per minute.""" - - model: str - """The model this rate limit applies to.""" - - object: Literal["project.rate_limit"] - """The object type, which is always `project.rate_limit`""" - - batch_1_day_max_input_tokens: Optional[int] = None - """The maximum batch input tokens per day. Only present for relevant models.""" - - max_audio_megabytes_per_1_minute: Optional[int] = None - """The maximum audio megabytes per minute. Only present for relevant models.""" - - max_images_per_1_minute: Optional[int] = None - """The maximum images per minute. Only present for relevant models.""" - - max_requests_per_1_day: Optional[int] = None - """The maximum requests per day. Only present for relevant models.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py deleted file mode 100644 index aa007e5f..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_params.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["RateLimitListParams"] - - -class RateLimitListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, beginning with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - limit: int - """A limit on the number of objects to be returned. The default is 100.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py deleted file mode 100644 index f2133f3e..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from ...._models import BaseModel -from .rate_limit import RateLimit - -__all__ = ["RateLimitListResponse"] - - -class RateLimitListResponse(BaseModel): - data: List[RateLimit] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py deleted file mode 100644 index a303d6f4..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/rate_limit_update_params.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["RateLimitUpdateParams"] - - -class RateLimitUpdateParams(TypedDict, total=False): - project_id: Required[str] - - batch_1_day_max_input_tokens: int - """The maximum batch input tokens per day. Only relevant for certain models.""" - - max_audio_megabytes_per_1_minute: int - """The maximum audio megabytes per minute. Only relevant for certain models.""" - - max_images_per_1_minute: int - """The maximum images per minute. Only relevant for certain models.""" - - max_requests_per_1_day: int - """The maximum requests per day. Only relevant for certain models.""" - - max_requests_per_1_minute: int - """The maximum requests per minute.""" - - max_tokens_per_1_minute: int - """The maximum tokens per minute.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account.py deleted file mode 100644 index 9200ba11..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/service_account.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["ServiceAccount"] - - -class ServiceAccount(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints""" - - created_at: int - """The Unix timestamp (in seconds) of when the service account was created""" - - name: str - """The name of the service account""" - - object: Literal["organization.project.service_account"] - """The object type, which is always `organization.project.service_account`""" - - role: Literal["owner", "member"] - """`owner` or `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py deleted file mode 100644 index 409dcba5..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_params.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["ServiceAccountCreateParams"] - - -class ServiceAccountCreateParams(TypedDict, total=False): - name: Required[str] - """The name of the service account being created.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py deleted file mode 100644 index e7757a8a..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_create_response.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["ServiceAccountCreateResponse", "APIKey"] - - -class APIKey(BaseModel): - id: str - - created_at: int - - name: str - - object: Literal["organization.project.service_account.api_key"] - """The object type, which is always `organization.project.service_account.api_key`""" - - value: str - - -class ServiceAccountCreateResponse(BaseModel): - id: str - - api_key: APIKey - - created_at: int - - name: str - - object: Literal["organization.project.service_account"] - - role: Literal["member"] - """Service accounts can only have one role of type `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py deleted file mode 100644 index 28d04e10..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["ServiceAccountDeleteResponse"] - - -class ServiceAccountDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["organization.project.service_account.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py deleted file mode 100644 index 7f808e28..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["ServiceAccountListParams"] - - -class ServiceAccountListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py deleted file mode 100644 index 0818c8c8..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/service_account_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from ...._models import BaseModel -from .service_account import ServiceAccount - -__all__ = ["ServiceAccountListResponse"] - - -class ServiceAccountListResponse(BaseModel): - data: List[ServiceAccount] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py deleted file mode 100644 index 85f38c0c..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/user_add_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UserAddParams"] - - -class UserAddParams(TypedDict, total=False): - role: Required[Literal["owner", "member"]] - """`owner` or `member`""" - - user_id: Required[str] - """The ID of the user.""" diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py deleted file mode 100644 index 7ac68cc5..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/user_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ...._models import BaseModel - -__all__ = ["UserDeleteResponse"] - - -class UserDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["organization.project.user.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py deleted file mode 100644 index d561e907..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/user_list_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["UserListParams"] - - -class UserListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py deleted file mode 100644 index 1f8993ad..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/user_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ...._models import BaseModel -from .project_user import ProjectUser - -__all__ = ["UserListResponse"] - - -class UserListResponse(BaseModel): - data: List[ProjectUser] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py deleted file mode 100644 index 08b3e1a4..00000000 --- a/src/digitalocean_genai_sdk/types/organization/projects/user_update_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UserUpdateParams"] - - -class UserUpdateParams(TypedDict, total=False): - project_id: Required[str] - - role: Required[Literal["owner", "member"]] - """`owner` or `member`""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py deleted file mode 100644 index 819ffc37..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_audio_speeches_params.py +++ /dev/null @@ -1,55 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageAudioSpeechesParams"] - - -class UsageAudioSpeechesParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - api_key_ids: List[str] - """Return only usage for these API keys.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any - combination of them. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - models: List[str] - """Return only usage for these models.""" - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" - - user_ids: List[str] - """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py deleted file mode 100644 index 318f85a3..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_audio_transcriptions_params.py +++ /dev/null @@ -1,55 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageAudioTranscriptionsParams"] - - -class UsageAudioTranscriptionsParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - api_key_ids: List[str] - """Return only usage for these API keys.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any - combination of them. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - models: List[str] - """Return only usage for these models.""" - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" - - user_ids: List[str] - """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py deleted file mode 100644 index 24322abe..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_code_interpreter_sessions_params.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageCodeInterpreterSessionsParams"] - - -class UsageCodeInterpreterSessionsParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py b/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py deleted file mode 100644 index 8bd94d39..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_completions_params.py +++ /dev/null @@ -1,61 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageCompletionsParams"] - - -class UsageCompletionsParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - api_key_ids: List[str] - """Return only usage for these API keys.""" - - batch: bool - """If `true`, return batch jobs only. - - If `false`, return non-batch jobs only. By default, return both. - """ - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "batch"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `batch` - or any combination of them. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - models: List[str] - """Return only usage for these models.""" - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" - - user_ids: List[str] - """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py b/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py deleted file mode 100644 index c4a71264..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_embeddings_params.py +++ /dev/null @@ -1,55 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageEmbeddingsParams"] - - -class UsageEmbeddingsParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - api_key_ids: List[str] - """Return only usage for these API keys.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any - combination of them. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - models: List[str] - """Return only usage for these models.""" - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" - - user_ids: List[str] - """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py b/src/digitalocean_genai_sdk/types/organization/usage_images_params.py deleted file mode 100644 index 31f2a31f..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_images_params.py +++ /dev/null @@ -1,69 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageImagesParams"] - - -class UsageImagesParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - api_key_ids: List[str] - """Return only usage for these API keys.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "user_id", "api_key_id", "model", "size", "source"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `size`, - `source` or any combination of them. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - models: List[str] - """Return only usage for these models.""" - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" - - sizes: List[Literal["256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"]] - """Return only usages for these image sizes. - - Possible values are `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792` - or any combination of them. - """ - - sources: List[Literal["image.generation", "image.edit", "image.variation"]] - """Return only usages for these sources. - - Possible values are `image.generation`, `image.edit`, `image.variation` or any - combination of them. - """ - - user_ids: List[str] - """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py b/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py deleted file mode 100644 index 438fca8f..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_moderations_params.py +++ /dev/null @@ -1,55 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageModerationsParams"] - - -class UsageModerationsParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - api_key_ids: List[str] - """Return only usage for these API keys.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "user_id", "api_key_id", "model"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any - combination of them. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - models: List[str] - """Return only usage for these models.""" - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" - - user_ids: List[str] - """Return only usage for these users.""" diff --git a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py b/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py deleted file mode 100644 index dc25f126..00000000 --- a/src/digitalocean_genai_sdk/types/organization/usage_vector_stores_params.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UsageVectorStoresParams"] - - -class UsageVectorStoresParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - bucket_width: Literal["1m", "1h", "1d"] - """Width of each time bucket in response. - - Currently `1m`, `1h` and `1d` are supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id"]] - """Group the usage data by the specified fields. - - Support fields include `project_id`. - """ - - limit: int - """Specifies the number of buckets to return. - - - `bucket_width=1d`: default: 7, max: 31 - - `bucket_width=1h`: default: 24, max: 168 - - `bucket_width=1m`: default: 60, max: 1440 - """ - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only usage for these projects.""" diff --git a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py b/src/digitalocean_genai_sdk/types/organization/user_delete_response.py deleted file mode 100644 index 5baab3bf..00000000 --- a/src/digitalocean_genai_sdk/types/organization/user_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["UserDeleteResponse"] - - -class UserDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["organization.user.deleted"] diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_params.py b/src/digitalocean_genai_sdk/types/organization/user_list_params.py deleted file mode 100644 index c7ad6c74..00000000 --- a/src/digitalocean_genai_sdk/types/organization/user_list_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -__all__ = ["UserListParams"] - - -class UserListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - emails: List[str] - """Filter by the email address of users.""" - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ diff --git a/src/digitalocean_genai_sdk/types/organization/user_list_response.py b/src/digitalocean_genai_sdk/types/organization/user_list_response.py deleted file mode 100644 index 73aaf45b..00000000 --- a/src/digitalocean_genai_sdk/types/organization/user_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from ..._models import BaseModel -from .organization_user import OrganizationUser - -__all__ = ["UserListResponse"] - - -class UserListResponse(BaseModel): - data: List[OrganizationUser] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/organization/user_update_params.py b/src/digitalocean_genai_sdk/types/organization/user_update_params.py deleted file mode 100644 index bc276120..00000000 --- a/src/digitalocean_genai_sdk/types/organization/user_update_params.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UserUpdateParams"] - - -class UserUpdateParams(TypedDict, total=False): - role: Required[Literal["owner", "reader"]] - """`owner` or `reader`""" diff --git a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py b/src/digitalocean_genai_sdk/types/organization_get_costs_params.py deleted file mode 100644 index e114aa0f..00000000 --- a/src/digitalocean_genai_sdk/types/organization_get_costs_params.py +++ /dev/null @@ -1,43 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["OrganizationGetCostsParams"] - - -class OrganizationGetCostsParams(TypedDict, total=False): - start_time: Required[int] - """Start time (Unix seconds) of the query time range, inclusive.""" - - bucket_width: Literal["1d"] - """Width of each time bucket in response. - - Currently only `1d` is supported, default to `1d`. - """ - - end_time: int - """End time (Unix seconds) of the query time range, exclusive.""" - - group_by: List[Literal["project_id", "line_item"]] - """Group the costs by the specified fields. - - Support fields include `project_id`, `line_item` and any combination of them. - """ - - limit: int - """A limit on the number of buckets to be returned. - - Limit can range between 1 and 180, and the default is 7. - """ - - page: str - """A cursor for use in pagination. - - Corresponding to the `next_page` field from the previous response. - """ - - project_ids: List[str] - """Return only costs for these projects.""" diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py deleted file mode 100644 index 36b79e57..00000000 --- a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_params.py +++ /dev/null @@ -1,87 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -from .audit_log_event_type import AuditLogEventType - -__all__ = ["OrganizationListAuditLogsParams", "EffectiveAt"] - - -class OrganizationListAuditLogsParams(TypedDict, total=False): - actor_emails: List[str] - """Return only events performed by users with these emails.""" - - actor_ids: List[str] - """Return only events performed by these actors. - - Can be a user ID, a service account ID, or an api key tracking ID. - """ - - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - effective_at: EffectiveAt - """Return only events whose `effective_at` (Unix seconds) is in this range.""" - - event_types: List[AuditLogEventType] - """Return only events with a `type` in one of these values. - - For example, `project.created`. For all options, see the documentation for the - [audit log object](/docs/api-reference/audit-logs/object). - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - project_ids: List[str] - """Return only events for these projects.""" - - resource_ids: List[str] - """Return only events performed on these targets. - - For example, a project ID updated. - """ - - -class EffectiveAt(TypedDict, total=False): - gt: int - """ - Return only events whose `effective_at` (Unix seconds) is greater than this - value. - """ - - gte: int - """ - Return only events whose `effective_at` (Unix seconds) is greater than or equal - to this value. - """ - - lt: int - """Return only events whose `effective_at` (Unix seconds) is less than this value.""" - - lte: int - """ - Return only events whose `effective_at` (Unix seconds) is less than or equal to - this value. - """ diff --git a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py b/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py deleted file mode 100644 index 751ec527..00000000 --- a/src/digitalocean_genai_sdk/types/organization_list_audit_logs_response.py +++ /dev/null @@ -1,433 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from pydantic import Field as FieldInfo - -from .._models import BaseModel -from .audit_log_actor_user import AuditLogActorUser -from .audit_log_event_type import AuditLogEventType - -__all__ = [ - "OrganizationListAuditLogsResponse", - "Data", - "DataActor", - "DataActorAPIKey", - "DataActorAPIKeyServiceAccount", - "DataActorSession", - "DataAPIKeyCreated", - "DataAPIKeyCreatedData", - "DataAPIKeyDeleted", - "DataAPIKeyUpdated", - "DataAPIKeyUpdatedChangesRequested", - "DataInviteAccepted", - "DataInviteDeleted", - "DataInviteSent", - "DataInviteSentData", - "DataLoginFailed", - "DataLogoutFailed", - "DataOrganizationUpdated", - "DataOrganizationUpdatedChangesRequested", - "DataOrganizationUpdatedChangesRequestedSettings", - "DataProject", - "DataProjectArchived", - "DataProjectCreated", - "DataProjectCreatedData", - "DataProjectUpdated", - "DataProjectUpdatedChangesRequested", - "DataRateLimitDeleted", - "DataRateLimitUpdated", - "DataRateLimitUpdatedChangesRequested", - "DataServiceAccountCreated", - "DataServiceAccountCreatedData", - "DataServiceAccountDeleted", - "DataServiceAccountUpdated", - "DataServiceAccountUpdatedChangesRequested", - "DataUserAdded", - "DataUserAddedData", - "DataUserDeleted", - "DataUserUpdated", - "DataUserUpdatedChangesRequested", -] - - -class DataActorAPIKeyServiceAccount(BaseModel): - id: Optional[str] = None - """The service account id.""" - - -class DataActorAPIKey(BaseModel): - id: Optional[str] = None - """The tracking id of the API key.""" - - service_account: Optional[DataActorAPIKeyServiceAccount] = None - """The service account that performed the audit logged action.""" - - type: Optional[Literal["user", "service_account"]] = None - """The type of API key. Can be either `user` or `service_account`.""" - - user: Optional[AuditLogActorUser] = None - """The user who performed the audit logged action.""" - - -class DataActorSession(BaseModel): - ip_address: Optional[str] = None - """The IP address from which the action was performed.""" - - user: Optional[AuditLogActorUser] = None - """The user who performed the audit logged action.""" - - -class DataActor(BaseModel): - api_key: Optional[DataActorAPIKey] = None - """The API Key used to perform the audit logged action.""" - - session: Optional[DataActorSession] = None - """The session in which the audit logged action was performed.""" - - type: Optional[Literal["session", "api_key"]] = None - """The type of actor. Is either `session` or `api_key`.""" - - -class DataAPIKeyCreatedData(BaseModel): - scopes: Optional[List[str]] = None - """A list of scopes allowed for the API key, e.g. `["api.model.request"]`""" - - -class DataAPIKeyCreated(BaseModel): - id: Optional[str] = None - """The tracking ID of the API key.""" - - data: Optional[DataAPIKeyCreatedData] = None - """The payload used to create the API key.""" - - -class DataAPIKeyDeleted(BaseModel): - id: Optional[str] = None - """The tracking ID of the API key.""" - - -class DataAPIKeyUpdatedChangesRequested(BaseModel): - scopes: Optional[List[str]] = None - """A list of scopes allowed for the API key, e.g. `["api.model.request"]`""" - - -class DataAPIKeyUpdated(BaseModel): - id: Optional[str] = None - """The tracking ID of the API key.""" - - changes_requested: Optional[DataAPIKeyUpdatedChangesRequested] = None - """The payload used to update the API key.""" - - -class DataInviteAccepted(BaseModel): - id: Optional[str] = None - """The ID of the invite.""" - - -class DataInviteDeleted(BaseModel): - id: Optional[str] = None - """The ID of the invite.""" - - -class DataInviteSentData(BaseModel): - email: Optional[str] = None - """The email invited to the organization.""" - - role: Optional[str] = None - """The role the email was invited to be. Is either `owner` or `member`.""" - - -class DataInviteSent(BaseModel): - id: Optional[str] = None - """The ID of the invite.""" - - data: Optional[DataInviteSentData] = None - """The payload used to create the invite.""" - - -class DataLoginFailed(BaseModel): - error_code: Optional[str] = None - """The error code of the failure.""" - - error_message: Optional[str] = None - """The error message of the failure.""" - - -class DataLogoutFailed(BaseModel): - error_code: Optional[str] = None - """The error code of the failure.""" - - error_message: Optional[str] = None - """The error message of the failure.""" - - -class DataOrganizationUpdatedChangesRequestedSettings(BaseModel): - threads_ui_visibility: Optional[str] = None - """ - Visibility of the threads page which shows messages created with the Assistants - API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`. - """ - - usage_dashboard_visibility: Optional[str] = None - """ - Visibility of the usage dashboard which shows activity and costs for your - organization. One of `ANY_ROLE` or `OWNERS`. - """ - - -class DataOrganizationUpdatedChangesRequested(BaseModel): - description: Optional[str] = None - """The organization description.""" - - name: Optional[str] = None - """The organization name.""" - - settings: Optional[DataOrganizationUpdatedChangesRequestedSettings] = None - - title: Optional[str] = None - """The organization title.""" - - -class DataOrganizationUpdated(BaseModel): - id: Optional[str] = None - """The organization ID.""" - - changes_requested: Optional[DataOrganizationUpdatedChangesRequested] = None - """The payload used to update the organization settings.""" - - -class DataProject(BaseModel): - id: Optional[str] = None - """The project ID.""" - - name: Optional[str] = None - """The project title.""" - - -class DataProjectArchived(BaseModel): - id: Optional[str] = None - """The project ID.""" - - -class DataProjectCreatedData(BaseModel): - name: Optional[str] = None - """The project name.""" - - title: Optional[str] = None - """The title of the project as seen on the dashboard.""" - - -class DataProjectCreated(BaseModel): - id: Optional[str] = None - """The project ID.""" - - data: Optional[DataProjectCreatedData] = None - """The payload used to create the project.""" - - -class DataProjectUpdatedChangesRequested(BaseModel): - title: Optional[str] = None - """The title of the project as seen on the dashboard.""" - - -class DataProjectUpdated(BaseModel): - id: Optional[str] = None - """The project ID.""" - - changes_requested: Optional[DataProjectUpdatedChangesRequested] = None - """The payload used to update the project.""" - - -class DataRateLimitDeleted(BaseModel): - id: Optional[str] = None - """The rate limit ID""" - - -class DataRateLimitUpdatedChangesRequested(BaseModel): - batch_1_day_max_input_tokens: Optional[int] = None - """The maximum batch input tokens per day. Only relevant for certain models.""" - - max_audio_megabytes_per_1_minute: Optional[int] = None - """The maximum audio megabytes per minute. Only relevant for certain models.""" - - max_images_per_1_minute: Optional[int] = None - """The maximum images per minute. Only relevant for certain models.""" - - max_requests_per_1_day: Optional[int] = None - """The maximum requests per day. Only relevant for certain models.""" - - max_requests_per_1_minute: Optional[int] = None - """The maximum requests per minute.""" - - max_tokens_per_1_minute: Optional[int] = None - """The maximum tokens per minute.""" - - -class DataRateLimitUpdated(BaseModel): - id: Optional[str] = None - """The rate limit ID""" - - changes_requested: Optional[DataRateLimitUpdatedChangesRequested] = None - """The payload used to update the rate limits.""" - - -class DataServiceAccountCreatedData(BaseModel): - role: Optional[str] = None - """The role of the service account. Is either `owner` or `member`.""" - - -class DataServiceAccountCreated(BaseModel): - id: Optional[str] = None - """The service account ID.""" - - data: Optional[DataServiceAccountCreatedData] = None - """The payload used to create the service account.""" - - -class DataServiceAccountDeleted(BaseModel): - id: Optional[str] = None - """The service account ID.""" - - -class DataServiceAccountUpdatedChangesRequested(BaseModel): - role: Optional[str] = None - """The role of the service account. Is either `owner` or `member`.""" - - -class DataServiceAccountUpdated(BaseModel): - id: Optional[str] = None - """The service account ID.""" - - changes_requested: Optional[DataServiceAccountUpdatedChangesRequested] = None - """The payload used to updated the service account.""" - - -class DataUserAddedData(BaseModel): - role: Optional[str] = None - """The role of the user. Is either `owner` or `member`.""" - - -class DataUserAdded(BaseModel): - id: Optional[str] = None - """The user ID.""" - - data: Optional[DataUserAddedData] = None - """The payload used to add the user to the project.""" - - -class DataUserDeleted(BaseModel): - id: Optional[str] = None - """The user ID.""" - - -class DataUserUpdatedChangesRequested(BaseModel): - role: Optional[str] = None - """The role of the user. Is either `owner` or `member`.""" - - -class DataUserUpdated(BaseModel): - id: Optional[str] = None - """The project ID.""" - - changes_requested: Optional[DataUserUpdatedChangesRequested] = None - """The payload used to update the user.""" - - -class Data(BaseModel): - id: str - """The ID of this log.""" - - actor: DataActor - """The actor who performed the audit logged action.""" - - effective_at: int - """The Unix timestamp (in seconds) of the event.""" - - type: AuditLogEventType - """The event type.""" - - api_key_created: Optional[DataAPIKeyCreated] = FieldInfo(alias="api_key.created", default=None) - """The details for events with this `type`.""" - - api_key_deleted: Optional[DataAPIKeyDeleted] = FieldInfo(alias="api_key.deleted", default=None) - """The details for events with this `type`.""" - - api_key_updated: Optional[DataAPIKeyUpdated] = FieldInfo(alias="api_key.updated", default=None) - """The details for events with this `type`.""" - - invite_accepted: Optional[DataInviteAccepted] = FieldInfo(alias="invite.accepted", default=None) - """The details for events with this `type`.""" - - invite_deleted: Optional[DataInviteDeleted] = FieldInfo(alias="invite.deleted", default=None) - """The details for events with this `type`.""" - - invite_sent: Optional[DataInviteSent] = FieldInfo(alias="invite.sent", default=None) - """The details for events with this `type`.""" - - login_failed: Optional[DataLoginFailed] = FieldInfo(alias="login.failed", default=None) - """The details for events with this `type`.""" - - logout_failed: Optional[DataLogoutFailed] = FieldInfo(alias="logout.failed", default=None) - """The details for events with this `type`.""" - - organization_updated: Optional[DataOrganizationUpdated] = FieldInfo(alias="organization.updated", default=None) - """The details for events with this `type`.""" - - project: Optional[DataProject] = None - """The project that the action was scoped to. - - Absent for actions not scoped to projects. - """ - - project_archived: Optional[DataProjectArchived] = FieldInfo(alias="project.archived", default=None) - """The details for events with this `type`.""" - - project_created: Optional[DataProjectCreated] = FieldInfo(alias="project.created", default=None) - """The details for events with this `type`.""" - - project_updated: Optional[DataProjectUpdated] = FieldInfo(alias="project.updated", default=None) - """The details for events with this `type`.""" - - rate_limit_deleted: Optional[DataRateLimitDeleted] = FieldInfo(alias="rate_limit.deleted", default=None) - """The details for events with this `type`.""" - - rate_limit_updated: Optional[DataRateLimitUpdated] = FieldInfo(alias="rate_limit.updated", default=None) - """The details for events with this `type`.""" - - service_account_created: Optional[DataServiceAccountCreated] = FieldInfo( - alias="service_account.created", default=None - ) - """The details for events with this `type`.""" - - service_account_deleted: Optional[DataServiceAccountDeleted] = FieldInfo( - alias="service_account.deleted", default=None - ) - """The details for events with this `type`.""" - - service_account_updated: Optional[DataServiceAccountUpdated] = FieldInfo( - alias="service_account.updated", default=None - ) - """The details for events with this `type`.""" - - user_added: Optional[DataUserAdded] = FieldInfo(alias="user.added", default=None) - """The details for events with this `type`.""" - - user_deleted: Optional[DataUserDeleted] = FieldInfo(alias="user.deleted", default=None) - """The details for events with this `type`.""" - - user_updated: Optional[DataUserUpdated] = FieldInfo(alias="user.updated", default=None) - """The details for events with this `type`.""" - - -class OrganizationListAuditLogsResponse(BaseModel): - data: List[Data] - - first_id: str - - has_more: bool - - last_id: str - - object: Literal["list"] diff --git a/src/digitalocean_genai_sdk/types/output_message.py b/src/digitalocean_genai_sdk/types/output_message.py deleted file mode 100644 index 4db6e72e..00000000 --- a/src/digitalocean_genai_sdk/types/output_message.py +++ /dev/null @@ -1,104 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "OutputMessage", - "Content", - "ContentOutputText", - "ContentOutputTextAnnotation", - "ContentOutputTextAnnotationFileCitation", - "ContentOutputTextAnnotationURLCitation", - "ContentOutputTextAnnotationFilePath", - "ContentRefusal", -] - - -class ContentOutputTextAnnotationFileCitation(BaseModel): - file_id: str - """The ID of the file.""" - - index: int - """The index of the file in the list of files.""" - - type: Literal["file_citation"] - """The type of the file citation. Always `file_citation`.""" - - -class ContentOutputTextAnnotationURLCitation(BaseModel): - end_index: int - """The index of the last character of the URL citation in the message.""" - - start_index: int - """The index of the first character of the URL citation in the message.""" - - title: str - """The title of the web resource.""" - - type: Literal["url_citation"] - """The type of the URL citation. Always `url_citation`.""" - - url: str - """The URL of the web resource.""" - - -class ContentOutputTextAnnotationFilePath(BaseModel): - file_id: str - """The ID of the file.""" - - index: int - """The index of the file in the list of files.""" - - type: Literal["file_path"] - """The type of the file path. Always `file_path`.""" - - -ContentOutputTextAnnotation: TypeAlias = Union[ - ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath -] - - -class ContentOutputText(BaseModel): - annotations: List[ContentOutputTextAnnotation] - """The annotations of the text output.""" - - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -class ContentRefusal(BaseModel): - refusal: str - """The refusal explanationfrom the model.""" - - type: Literal["refusal"] - """The type of the refusal. Always `refusal`.""" - - -Content: TypeAlias = Union[ContentOutputText, ContentRefusal] - - -class OutputMessage(BaseModel): - id: str - """The unique ID of the output message.""" - - content: List[Content] - """The content of the output message.""" - - role: Literal["assistant"] - """The role of the output message. Always `assistant`.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the message input. - - One of `in_progress`, `completed`, or `incomplete`. Populated when input items - are returned via API. - """ - - type: Literal["message"] - """The type of the output message. Always `message`.""" diff --git a/src/digitalocean_genai_sdk/types/output_message_param.py b/src/digitalocean_genai_sdk/types/output_message_param.py deleted file mode 100644 index 83f13e18..00000000 --- a/src/digitalocean_genai_sdk/types/output_message_param.py +++ /dev/null @@ -1,104 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = [ - "OutputMessageParam", - "Content", - "ContentOutputText", - "ContentOutputTextAnnotation", - "ContentOutputTextAnnotationFileCitation", - "ContentOutputTextAnnotationURLCitation", - "ContentOutputTextAnnotationFilePath", - "ContentRefusal", -] - - -class ContentOutputTextAnnotationFileCitation(TypedDict, total=False): - file_id: Required[str] - """The ID of the file.""" - - index: Required[int] - """The index of the file in the list of files.""" - - type: Required[Literal["file_citation"]] - """The type of the file citation. Always `file_citation`.""" - - -class ContentOutputTextAnnotationURLCitation(TypedDict, total=False): - end_index: Required[int] - """The index of the last character of the URL citation in the message.""" - - start_index: Required[int] - """The index of the first character of the URL citation in the message.""" - - title: Required[str] - """The title of the web resource.""" - - type: Required[Literal["url_citation"]] - """The type of the URL citation. Always `url_citation`.""" - - url: Required[str] - """The URL of the web resource.""" - - -class ContentOutputTextAnnotationFilePath(TypedDict, total=False): - file_id: Required[str] - """The ID of the file.""" - - index: Required[int] - """The index of the file in the list of files.""" - - type: Required[Literal["file_path"]] - """The type of the file path. Always `file_path`.""" - - -ContentOutputTextAnnotation: TypeAlias = Union[ - ContentOutputTextAnnotationFileCitation, ContentOutputTextAnnotationURLCitation, ContentOutputTextAnnotationFilePath -] - - -class ContentOutputText(TypedDict, total=False): - annotations: Required[Iterable[ContentOutputTextAnnotation]] - """The annotations of the text output.""" - - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -class ContentRefusal(TypedDict, total=False): - refusal: Required[str] - """The refusal explanationfrom the model.""" - - type: Required[Literal["refusal"]] - """The type of the refusal. Always `refusal`.""" - - -Content: TypeAlias = Union[ContentOutputText, ContentRefusal] - - -class OutputMessageParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the output message.""" - - content: Required[Iterable[Content]] - """The content of the output message.""" - - role: Required[Literal["assistant"]] - """The role of the output message. Always `assistant`.""" - - status: Required[Literal["in_progress", "completed", "incomplete"]] - """The status of the message input. - - One of `in_progress`, `completed`, or `incomplete`. Populated when input items - are returned via API. - """ - - type: Required[Literal["message"]] - """The type of the output message. Always `message`.""" diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_session_params.py deleted file mode 100644 index df105bac..00000000 --- a/src/digitalocean_genai_sdk/types/realtime_create_session_params.py +++ /dev/null @@ -1,230 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal, TypedDict - -from .voice_ids_shared_param import VoiceIDsSharedParam - -__all__ = [ - "RealtimeCreateSessionParams", - "InputAudioNoiseReduction", - "InputAudioTranscription", - "Tool", - "TurnDetection", -] - - -class RealtimeCreateSessionParams(TypedDict, total=False): - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. - - Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must - be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian - byte order. - """ - - input_audio_noise_reduction: InputAudioNoiseReduction - """Configuration for input audio noise reduction. - - This can be set to `null` to turn off. Noise reduction filters audio added to - the input audio buffer before it is sent to VAD and the model. Filtering the - audio can improve VAD and turn detection accuracy (reducing false positives) and - model performance by improving perception of the input audio. - """ - - input_audio_transcription: InputAudioTranscription - """ - Configuration for input audio transcription, defaults to off and can be set to - `null` to turn off once on. Input audio transcription is not native to the - model, since the model consumes audio directly. Transcription runs - asynchronously through - [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) - and should be treated as guidance of input audio content rather than precisely - what the model heard. The client can optionally set the language and prompt for - transcription, these offer additional guidance to the transcription service. - """ - - instructions: str - """The default system instructions (i.e. - - system message) prepended to model calls. This field allows the client to guide - the model on desired responses. The model can be instructed on response content - and format, (e.g. "be extremely succinct", "act friendly", "here are examples of - good responses") and on audio behavior (e.g. "talk quickly", "inject emotion - into your voice", "laugh frequently"). The instructions are not guaranteed to be - followed by the model, but they provide guidance to the model on the desired - behavior. - - Note that the server sets default instructions which will be used if this field - is not set and are visible in the `session.created` event at the start of the - session. - """ - - max_response_output_tokens: Union[int, Literal["inf"]] - """ - Maximum number of output tokens for a single assistant response, inclusive of - tool calls. Provide an integer between 1 and 4096 to limit output tokens, or - `inf` for the maximum available tokens for a given model. Defaults to `inf`. - """ - - modalities: List[Literal["text", "audio"]] - """The set of modalities the model can respond with. - - To disable audio, set this to ["text"]. - """ - - model: Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - """The Realtime model used for this session.""" - - output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. - - Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is - sampled at a rate of 24kHz. - """ - - temperature: float - """Sampling temperature for the model, limited to [0.6, 1.2]. - - For audio models a temperature of 0.8 is highly recommended for best - performance. - """ - - tool_choice: str - """How the model chooses tools. - - Options are `auto`, `none`, `required`, or specify a function. - """ - - tools: Iterable[Tool] - """Tools (functions) available to the model.""" - - turn_detection: TurnDetection - """Configuration for turn detection, ether Server VAD or Semantic VAD. - - This can be set to `null` to turn off, in which case the client must manually - trigger model response. Server VAD means that the model will detect the start - and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction - with VAD) to semantically estimate whether the user has finished speaking, then - dynamically sets a timeout based on this probability. For example, if user audio - trails off with "uhhm", the model will score a low probability of turn end and - wait longer for the user to continue speaking. This can be useful for more - natural conversations, but may have a higher latency. - """ - - voice: VoiceIDsSharedParam - """The voice the model uses to respond. - - Voice cannot be changed during the session once the model has responded with - audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. - """ - - -class InputAudioNoiseReduction(TypedDict, total=False): - type: Literal["near_field", "far_field"] - """Type of noise reduction. - - `near_field` is for close-talking microphones such as headphones, `far_field` is - for far-field microphones such as laptop or conference room microphones. - """ - - -class InputAudioTranscription(TypedDict, total=False): - language: str - """The language of the input audio. - - Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - format will improve accuracy and latency. - """ - - model: str - """ - The model to use for transcription, current options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1`. - """ - - prompt: str - """ - An optional text to guide the model's style or continue a previous audio - segment. For `whisper-1`, the - [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For - `gpt-4o-transcribe` models, the prompt is a free text string, for example - "expect words related to technology". - """ - - -class Tool(TypedDict, total=False): - description: str - """ - The description of the function, including guidance on when and how to call it, - and guidance about what to tell the user when calling (if anything). - """ - - name: str - """The name of the function.""" - - parameters: object - """Parameters of the function in JSON Schema.""" - - type: Literal["function"] - """The type of the tool, i.e. `function`.""" - - -class TurnDetection(TypedDict, total=False): - create_response: bool - """ - Whether or not to automatically generate a response when a VAD stop event - occurs. - """ - - eagerness: Literal["low", "medium", "high", "auto"] - """Used only for `semantic_vad` mode. - - The eagerness of the model to respond. `low` will wait longer for the user to - continue speaking, `high` will respond more quickly. `auto` is the default and - is equivalent to `medium`. - """ - - interrupt_response: bool - """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. - """ - - prefix_padding_ms: int - """Used only for `server_vad` mode. - - Amount of audio to include before the VAD detected speech (in milliseconds). - Defaults to 300ms. - """ - - silence_duration_ms: int - """Used only for `server_vad` mode. - - Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - With shorter values the model will respond more quickly, but may jump in on - short pauses from the user. - """ - - threshold: float - """Used only for `server_vad` mode. - - Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - threshold will require louder audio to activate the model, and thus might - perform better in noisy environments. - """ - - type: Literal["server_vad", "semantic_vad"] - """Type of turn detection.""" diff --git a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_session_response.py deleted file mode 100644 index 1b7bc03c..00000000 --- a/src/digitalocean_genai_sdk/types/realtime_create_session_response.py +++ /dev/null @@ -1,151 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .voice_ids_shared import VoiceIDsShared - -__all__ = ["RealtimeCreateSessionResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"] - - -class ClientSecret(BaseModel): - expires_at: int - """Timestamp for when the token expires. - - Currently, all tokens expire after one minute. - """ - - value: str - """ - Ephemeral key usable in client environments to authenticate connections to the - Realtime API. Use this in client-side environments rather than a standard API - token, which should only be used server-side. - """ - - -class InputAudioTranscription(BaseModel): - model: Optional[str] = None - """ - The model to use for transcription, `whisper-1` is the only currently supported - model. - """ - - -class Tool(BaseModel): - description: Optional[str] = None - """ - The description of the function, including guidance on when and how to call it, - and guidance about what to tell the user when calling (if anything). - """ - - name: Optional[str] = None - """The name of the function.""" - - parameters: Optional[object] = None - """Parameters of the function in JSON Schema.""" - - type: Optional[Literal["function"]] = None - """The type of the tool, i.e. `function`.""" - - -class TurnDetection(BaseModel): - prefix_padding_ms: Optional[int] = None - """Amount of audio to include before the VAD detected speech (in milliseconds). - - Defaults to 300ms. - """ - - silence_duration_ms: Optional[int] = None - """Duration of silence to detect speech stop (in milliseconds). - - Defaults to 500ms. With shorter values the model will respond more quickly, but - may jump in on short pauses from the user. - """ - - threshold: Optional[float] = None - """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. - - A higher threshold will require louder audio to activate the model, and thus - might perform better in noisy environments. - """ - - type: Optional[str] = None - """Type of turn detection, only `server_vad` is currently supported.""" - - -class RealtimeCreateSessionResponse(BaseModel): - client_secret: ClientSecret - """Ephemeral key returned by the API.""" - - input_audio_format: Optional[str] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" - - input_audio_transcription: Optional[InputAudioTranscription] = None - """ - Configuration for input audio transcription, defaults to off and can be set to - `null` to turn off once on. Input audio transcription is not native to the - model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. - """ - - instructions: Optional[str] = None - """The default system instructions (i.e. - - system message) prepended to model calls. This field allows the client to guide - the model on desired responses. The model can be instructed on response content - and format, (e.g. "be extremely succinct", "act friendly", "here are examples of - good responses") and on audio behavior (e.g. "talk quickly", "inject emotion - into your voice", "laugh frequently"). The instructions are not guaranteed to be - followed by the model, but they provide guidance to the model on the desired - behavior. - - Note that the server sets default instructions which will be used if this field - is not set and are visible in the `session.created` event at the start of the - session. - """ - - max_response_output_tokens: Union[int, Literal["inf"], None] = None - """ - Maximum number of output tokens for a single assistant response, inclusive of - tool calls. Provide an integer between 1 and 4096 to limit output tokens, or - `inf` for the maximum available tokens for a given model. Defaults to `inf`. - """ - - modalities: Optional[List[Literal["text", "audio"]]] = None - """The set of modalities the model can respond with. - - To disable audio, set this to ["text"]. - """ - - output_audio_format: Optional[str] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" - - temperature: Optional[float] = None - """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" - - tool_choice: Optional[str] = None - """How the model chooses tools. - - Options are `auto`, `none`, `required`, or specify a function. - """ - - tools: Optional[List[Tool]] = None - """Tools (functions) available to the model.""" - - turn_detection: Optional[TurnDetection] = None - """Configuration for turn detection. - - Can be set to `null` to turn off. Server VAD means that the model will detect - the start and end of speech based on audio volume and respond at the end of user - speech. - """ - - voice: Optional[VoiceIDsShared] = None - """The voice the model uses to respond. - - Voice cannot be changed during the session once the model has responded with - audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. - """ diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py deleted file mode 100644 index 21912679..00000000 --- a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_params.py +++ /dev/null @@ -1,149 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, TypedDict - -__all__ = [ - "RealtimeCreateTranscriptionSessionParams", - "InputAudioNoiseReduction", - "InputAudioTranscription", - "TurnDetection", -] - - -class RealtimeCreateTranscriptionSessionParams(TypedDict, total=False): - include: List[str] - """The set of items to include in the transcription. Current available items are: - - - `item.input_audio_transcription.logprobs` - """ - - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. - - Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must - be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian - byte order. - """ - - input_audio_noise_reduction: InputAudioNoiseReduction - """Configuration for input audio noise reduction. - - This can be set to `null` to turn off. Noise reduction filters audio added to - the input audio buffer before it is sent to VAD and the model. Filtering the - audio can improve VAD and turn detection accuracy (reducing false positives) and - model performance by improving perception of the input audio. - """ - - input_audio_transcription: InputAudioTranscription - """Configuration for input audio transcription. - - The client can optionally set the language and prompt for transcription, these - offer additional guidance to the transcription service. - """ - - modalities: List[Literal["text", "audio"]] - """The set of modalities the model can respond with. - - To disable audio, set this to ["text"]. - """ - - turn_detection: TurnDetection - """Configuration for turn detection, ether Server VAD or Semantic VAD. - - This can be set to `null` to turn off, in which case the client must manually - trigger model response. Server VAD means that the model will detect the start - and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction - with VAD) to semantically estimate whether the user has finished speaking, then - dynamically sets a timeout based on this probability. For example, if user audio - trails off with "uhhm", the model will score a low probability of turn end and - wait longer for the user to continue speaking. This can be useful for more - natural conversations, but may have a higher latency. - """ - - -class InputAudioNoiseReduction(TypedDict, total=False): - type: Literal["near_field", "far_field"] - """Type of noise reduction. - - `near_field` is for close-talking microphones such as headphones, `far_field` is - for far-field microphones such as laptop or conference room microphones. - """ - - -class InputAudioTranscription(TypedDict, total=False): - language: str - """The language of the input audio. - - Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - format will improve accuracy and latency. - """ - - model: Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"] - """ - The model to use for transcription, current options are `gpt-4o-transcribe`, - `gpt-4o-mini-transcribe`, and `whisper-1`. - """ - - prompt: str - """ - An optional text to guide the model's style or continue a previous audio - segment. For `whisper-1`, the - [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For - `gpt-4o-transcribe` models, the prompt is a free text string, for example - "expect words related to technology". - """ - - -class TurnDetection(TypedDict, total=False): - create_response: bool - """Whether or not to automatically generate a response when a VAD stop event - occurs. - - Not available for transcription sessions. - """ - - eagerness: Literal["low", "medium", "high", "auto"] - """Used only for `semantic_vad` mode. - - The eagerness of the model to respond. `low` will wait longer for the user to - continue speaking, `high` will respond more quickly. `auto` is the default and - is equivalent to `medium`. - """ - - interrupt_response: bool - """ - Whether or not to automatically interrupt any ongoing response with output to - the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. Not available for transcription sessions. - """ - - prefix_padding_ms: int - """Used only for `server_vad` mode. - - Amount of audio to include before the VAD detected speech (in milliseconds). - Defaults to 300ms. - """ - - silence_duration_ms: int - """Used only for `server_vad` mode. - - Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - With shorter values the model will respond more quickly, but may jump in on - short pauses from the user. - """ - - threshold: float - """Used only for `server_vad` mode. - - Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - threshold will require louder audio to activate the model, and thus might - perform better in noisy environments. - """ - - type: Literal["server_vad", "semantic_vad"] - """Type of turn detection.""" diff --git a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py b/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py deleted file mode 100644 index bbd0b9de..00000000 --- a/src/digitalocean_genai_sdk/types/realtime_create_transcription_session_response.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["RealtimeCreateTranscriptionSessionResponse", "ClientSecret", "InputAudioTranscription", "TurnDetection"] - - -class ClientSecret(BaseModel): - expires_at: int - """Timestamp for when the token expires. - - Currently, all tokens expire after one minute. - """ - - value: str - """ - Ephemeral key usable in client environments to authenticate connections to the - Realtime API. Use this in client-side environments rather than a standard API - token, which should only be used server-side. - """ - - -class InputAudioTranscription(BaseModel): - language: Optional[str] = None - """The language of the input audio. - - Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - format will improve accuracy and latency. - """ - - model: Optional[Literal["gpt-4o-transcribe", "gpt-4o-mini-transcribe", "whisper-1"]] = None - """The model to use for transcription. - - Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`. - """ - - prompt: Optional[str] = None - """An optional text to guide the model's style or continue a previous audio - segment. - - The [prompt](/docs/guides/speech-to-text#prompting) should match the audio - language. - """ - - -class TurnDetection(BaseModel): - prefix_padding_ms: Optional[int] = None - """Amount of audio to include before the VAD detected speech (in milliseconds). - - Defaults to 300ms. - """ - - silence_duration_ms: Optional[int] = None - """Duration of silence to detect speech stop (in milliseconds). - - Defaults to 500ms. With shorter values the model will respond more quickly, but - may jump in on short pauses from the user. - """ - - threshold: Optional[float] = None - """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. - - A higher threshold will require louder audio to activate the model, and thus - might perform better in noisy environments. - """ - - type: Optional[str] = None - """Type of turn detection, only `server_vad` is currently supported.""" - - -class RealtimeCreateTranscriptionSessionResponse(BaseModel): - client_secret: ClientSecret - """Ephemeral key returned by the API. - - Only present when the session is created on the server via REST API. - """ - - input_audio_format: Optional[str] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" - - input_audio_transcription: Optional[InputAudioTranscription] = None - """Configuration of the transcription model.""" - - modalities: Optional[List[Literal["text", "audio"]]] = None - """The set of modalities the model can respond with. - - To disable audio, set this to ["text"]. - """ - - turn_detection: Optional[TurnDetection] = None - """Configuration for turn detection. - - Can be set to `null` to turn off. Server VAD means that the model will detect - the start and end of speech based on audio volume and respond at the end of user - speech. - """ diff --git a/src/digitalocean_genai_sdk/types/reasoning_effort.py b/src/digitalocean_genai_sdk/types/reasoning_effort.py deleted file mode 100644 index ace21b67..00000000 --- a/src/digitalocean_genai_sdk/types/reasoning_effort.py +++ /dev/null @@ -1,8 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal, TypeAlias - -__all__ = ["ReasoningEffort"] - -ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] diff --git a/src/digitalocean_genai_sdk/types/reasoning_item.py b/src/digitalocean_genai_sdk/types/reasoning_item.py deleted file mode 100644 index 28a64183..00000000 --- a/src/digitalocean_genai_sdk/types/reasoning_item.py +++ /dev/null @@ -1,36 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ReasoningItem", "Summary"] - - -class Summary(BaseModel): - text: str - """ - A short summary of the reasoning used by the model when generating the response. - """ - - type: Literal["summary_text"] - """The type of the object. Always `summary_text`.""" - - -class ReasoningItem(BaseModel): - id: str - """The unique identifier of the reasoning content.""" - - summary: List[Summary] - """Reasoning text contents.""" - - type: Literal["reasoning"] - """The type of the object. Always `reasoning`.""" - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/reasoning_item_param.py b/src/digitalocean_genai_sdk/types/reasoning_item_param.py deleted file mode 100644 index 4d2a0504..00000000 --- a/src/digitalocean_genai_sdk/types/reasoning_item_param.py +++ /dev/null @@ -1,36 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ReasoningItemParam", "Summary"] - - -class Summary(TypedDict, total=False): - text: Required[str] - """ - A short summary of the reasoning used by the model when generating the response. - """ - - type: Required[Literal["summary_text"]] - """The type of the object. Always `summary_text`.""" - - -class ReasoningItemParam(TypedDict, total=False): - id: Required[str] - """The unique identifier of the reasoning content.""" - - summary: Required[Iterable[Summary]] - """Reasoning text contents.""" - - type: Required[Literal["reasoning"]] - """The type of the object. Always `reasoning`.""" - - status: Literal["in_progress", "completed", "incomplete"] - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ diff --git a/src/digitalocean_genai_sdk/types/response.py b/src/digitalocean_genai_sdk/types/response.py deleted file mode 100644 index 523eedfc..00000000 --- a/src/digitalocean_genai_sdk/types/response.py +++ /dev/null @@ -1,142 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias - -from .._utils import PropertyInfo -from .._models import BaseModel -from .output_message import OutputMessage -from .reasoning_item import ReasoningItem -from .computer_tool_call import ComputerToolCall -from .function_tool_call import FunctionToolCall -from .response_properties import ResponseProperties -from .web_search_tool_call import WebSearchToolCall -from .file_search_tool_call import FileSearchToolCall -from .model_response_properties import ModelResponseProperties - -__all__ = [ - "Response", - "ResponseError", - "ResponseIncompleteDetails", - "ResponseOutput", - "ResponseUsage", - "ResponseUsageInputTokensDetails", - "ResponseUsageOutputTokensDetails", -] - - -class ResponseError(BaseModel): - code: Literal[ - "server_error", - "rate_limit_exceeded", - "invalid_prompt", - "vector_store_timeout", - "invalid_image", - "invalid_image_format", - "invalid_base64_image", - "invalid_image_url", - "image_too_large", - "image_too_small", - "image_parse_error", - "image_content_policy_violation", - "invalid_image_mode", - "image_file_too_large", - "unsupported_image_media_type", - "empty_image_file", - "failed_to_download_image", - "image_file_not_found", - ] - """The error code for the response.""" - - message: str - """A human-readable description of the error.""" - - -class ResponseIncompleteDetails(BaseModel): - reason: Optional[Literal["max_output_tokens", "content_filter"]] = None - """The reason why the response is incomplete.""" - - -ResponseOutput: TypeAlias = Annotated[ - Union[OutputMessage, FileSearchToolCall, FunctionToolCall, WebSearchToolCall, ComputerToolCall, ReasoningItem], - PropertyInfo(discriminator="type"), -] - - -class ResponseUsageInputTokensDetails(BaseModel): - cached_tokens: int - """The number of tokens that were retrieved from the cache. - - [More on prompt caching](/docs/guides/prompt-caching). - """ - - -class ResponseUsageOutputTokensDetails(BaseModel): - reasoning_tokens: int - """The number of reasoning tokens.""" - - -class ResponseUsage(BaseModel): - input_tokens: int - """The number of input tokens.""" - - input_tokens_details: ResponseUsageInputTokensDetails - """A detailed breakdown of the input tokens.""" - - output_tokens: int - """The number of output tokens.""" - - output_tokens_details: ResponseUsageOutputTokensDetails - """A detailed breakdown of the output tokens.""" - - total_tokens: int - """The total number of tokens used.""" - - -class Response(ModelResponseProperties, ResponseProperties): - id: str - """Unique identifier for this Response.""" - - created_at: float - """Unix timestamp (in seconds) of when this Response was created.""" - - error: Optional[ResponseError] = None - """An error object returned when the model fails to generate a Response.""" - - incomplete_details: Optional[ResponseIncompleteDetails] = None - """Details about why the response is incomplete.""" - - object: Literal["response"] - """The object type of this resource - always set to `response`.""" - - output: List[ResponseOutput] - """An array of content items generated by the model. - - - The length and order of items in the `output` array is dependent on the - model's response. - - Rather than accessing the first item in the `output` array and assuming it's - an `assistant` message with the content generated by the model, you might - consider using the `output_text` property where supported in SDKs. - """ - - parallel_tool_calls: bool - """Whether to allow the model to run tool calls in parallel.""" - - output_text: Optional[str] = None - """ - SDK-only convenience property that contains the aggregated text output from all - `output_text` items in the `output` array, if any are present. Supported in the - Python and JavaScript SDKs. - """ - - status: Optional[Literal["completed", "failed", "in_progress", "incomplete"]] = None - """The status of the response generation. - - One of `completed`, `failed`, `in_progress`, or `incomplete`. - """ - - usage: Optional[ResponseUsage] = None - """ - Represents token usage details including input tokens, output tokens, a - breakdown of output tokens, and the total tokens used. - """ diff --git a/src/digitalocean_genai_sdk/types/response_create_params.py b/src/digitalocean_genai_sdk/types/response_create_params.py deleted file mode 100644 index 878e53a5..00000000 --- a/src/digitalocean_genai_sdk/types/response_create_params.py +++ /dev/null @@ -1,494 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .includable import Includable -from .reasoning_effort import ReasoningEffort -from .input_content_param import InputContentParam -from .input_message_param import InputMessageParam -from .output_message_param import OutputMessageParam -from .reasoning_item_param import ReasoningItemParam -from .compound_filter_param import CompoundFilterParam -from .comparison_filter_param import ComparisonFilterParam -from .computer_tool_call_param import ComputerToolCallParam -from .function_tool_call_param import FunctionToolCallParam -from .web_search_tool_call_param import WebSearchToolCallParam -from .file_search_tool_call_param import FileSearchToolCallParam -from .chat.web_search_context_size import WebSearchContextSize -from .chat.web_search_location_param import WebSearchLocationParam -from .chat.response_format_text_param import ResponseFormatTextParam -from .computer_tool_call_output_param import ComputerToolCallOutputParam -from .function_tool_call_output_param import FunctionToolCallOutputParam -from .chat.response_format_json_object_param import ResponseFormatJsonObjectParam - -__all__ = [ - "ResponseCreateParams", - "InputInputItemList", - "InputInputItemListMessage", - "InputInputItemListItemReference", - "Reasoning", - "Text", - "TextFormat", - "TextFormatTextResponseFormatJsonSchema", - "ToolChoice", - "ToolChoiceToolChoiceTypes", - "ToolChoiceToolChoiceFunction", - "Tool", - "ToolFileSearchTool", - "ToolFileSearchToolFilters", - "ToolFileSearchToolRankingOptions", - "ToolFunctionTool", - "ToolComputerTool", - "ToolWebSearchTool", - "ToolWebSearchToolUserLocation", -] - - -class ResponseCreateParams(TypedDict, total=False): - input: Required[Union[str, Iterable[InputInputItemList]]] - """Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Image inputs](/docs/guides/images) - - [File inputs](/docs/guides/pdf-files) - - [Conversation state](/docs/guides/conversation-state) - - [Function calling](/docs/guides/function-calling) - """ - - model: Required[ - Union[ - Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "gpt-4o-search-preview", - "gpt-4o-mini-search-preview", - "gpt-4o-search-preview-2025-03-11", - "gpt-4o-mini-search-preview-2025-03-11", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - "o1-pro", - "o1-pro-2025-03-19", - "computer-use-preview", - "computer-use-preview-2025-03-11", - ], - str, - ] - ] - """Model ID used to generate the response, like `gpt-4o` or `o1`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the [model guide](/docs/models) to - browse and compare available models. - """ - - include: Optional[List[Includable]] - """Specify additional output data to include in the model response. - - Currently supported values are: - - - `file_search_call.results`: Include the search results of - - the file search tool call. - - - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls from the computer - call output. - """ - - instructions: Optional[str] - """ - Inserts a system (or developer) message as the first item in the model's - context. - - When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. - """ - - max_output_tokens: Optional[int] - """ - An upper bound for the number of tokens that can be generated for a response, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - parallel_tool_calls: Optional[bool] - """Whether to allow the model to run tool calls in parallel.""" - - previous_response_id: Optional[str] - """The unique ID of the previous response to the model. - - Use this to create multi-turn conversations. Learn more about - [conversation state](/docs/guides/conversation-state). - """ - - reasoning: Optional[Reasoning] - """**o-series models only** - - Configuration options for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). - """ - - store: Optional[bool] - """Whether to store the generated model response for later retrieval via API.""" - - stream: Optional[bool] - """ - If set to true, the model response data will be streamed to the client as it is - generated using - [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). - See the [Streaming section below](/docs/api-reference/responses-streaming) for - more information. - """ - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. We generally recommend altering - this or `top_p` but not both. - """ - - text: Text - """Configuration options for a text response from the model. - - Can be plain text or structured JSON data. Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Structured Outputs](/docs/guides/structured-outputs) - """ - - tool_choice: ToolChoice - """ - How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. - """ - - tools: Iterable[Tool] - """An array of tools the model may call while generating a response. - - You can specify which tool to use by setting the `tool_choice` parameter. - - The two categories of tools you can provide the model are: - - - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - capabilities, like [web search](/docs/guides/tools-web-search) or - [file search](/docs/guides/tools-file-search). Learn more about - [built-in tools](/docs/guides/tools). - - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about - [function calling](/docs/guides/function-calling). - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - """ - - truncation: Optional[Literal["auto", "disabled"]] - """The truncation strategy to use for the model response. - - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size - for a model, the request will fail with a 400 error. - """ - - user: str - """ - A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). - """ - - -class InputInputItemListMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[InputContentParam]]] - """ - Text, image, or audio input to the model, used to generate a response. Can also - contain previous assistant responses. - """ - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" - - -class InputInputItemListItemReference(TypedDict, total=False): - id: Required[str] - """The ID of the item to reference.""" - - type: Required[Literal["item_reference"]] - """The type of item to reference. Always `item_reference`.""" - - -InputInputItemList: TypeAlias = Union[ - InputInputItemListMessage, - InputMessageParam, - OutputMessageParam, - FileSearchToolCallParam, - ComputerToolCallParam, - ComputerToolCallOutputParam, - WebSearchToolCallParam, - FunctionToolCallParam, - FunctionToolCallOutputParam, - ReasoningItemParam, - InputInputItemListItemReference, -] - - -class Reasoning(TypedDict, total=False): - effort: Optional[ReasoningEffort] - """**o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - """ - - generate_summary: Optional[Literal["concise", "detailed"]] - """**computer_use_preview only** - - A summary of the reasoning performed by the model. This can be useful for - debugging and understanding the model's reasoning process. One of `concise` or - `detailed`. - """ - - -class TextFormatTextResponseFormatJsonSchema(TypedDict, total=False): - schema: Required[Dict[str, object]] - """ - The schema for the response format, described as a JSON Schema object. Learn how - to build JSON schemas [here](https://json-schema.org/). - """ - - type: Required[Literal["json_schema"]] - """The type of response format being defined. Always `json_schema`.""" - - description: str - """ - A description of what the response format is for, used by the model to determine - how to respond in the format. - """ - - name: str - """The name of the response format. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - strict: Optional[bool] - """ - Whether to enable strict schema adherence when generating the output. If set to - true, the model will always follow the exact schema defined in the `schema` - field. Only a subset of JSON Schema is supported when `strict` is `true`. To - learn more, read the - [Structured Outputs guide](/docs/guides/structured-outputs). - """ - - -TextFormat: TypeAlias = Union[ - ResponseFormatTextParam, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObjectParam -] - - -class Text(TypedDict, total=False): - format: TextFormat - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - -class ToolChoiceToolChoiceTypes(TypedDict, total=False): - type: Required[ - Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] - ] - """The type of hosted tool the model should to use. - - Learn more about [built-in tools](/docs/guides/tools). - - Allowed values are: - - - `file_search` - - `web_search_preview` - - `computer_use_preview` - """ - - -class ToolChoiceToolChoiceFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - type: Required[Literal["function"]] - """For function calling, the type is always `function`.""" - - -ToolChoice: TypeAlias = Union[ - Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction -] - -ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam] - - -class ToolFileSearchToolRankingOptions(TypedDict, total=False): - ranker: Literal["auto", "default-2024-11-15"] - """The ranker to use for the file search.""" - - score_threshold: float - """ - The score threshold for the file search, a number between 0 and 1. Numbers - closer to 1 will attempt to return only the most relevant results, but may - return fewer results. - """ - - -class ToolFileSearchTool(TypedDict, total=False): - type: Required[Literal["file_search"]] - """The type of the file search tool. Always `file_search`.""" - - vector_store_ids: Required[List[str]] - """The IDs of the vector stores to search.""" - - filters: ToolFileSearchToolFilters - """A filter to apply based on file attributes.""" - - max_num_results: int - """The maximum number of results to return. - - This number should be between 1 and 50 inclusive. - """ - - ranking_options: ToolFileSearchToolRankingOptions - """Ranking options for search.""" - - -class ToolFunctionTool(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - parameters: Required[Dict[str, object]] - """A JSON schema object describing the parameters of the function.""" - - strict: Required[bool] - """Whether to enforce strict parameter validation. Default `true`.""" - - type: Required[Literal["function"]] - """The type of the function tool. Always `function`.""" - - description: Optional[str] - """A description of the function. - - Used by the model to determine whether or not to call the function. - """ - - -class ToolComputerTool(TypedDict, total=False): - display_height: Required[float] - """The height of the computer display.""" - - display_width: Required[float] - """The width of the computer display.""" - - environment: Required[Literal["mac", "windows", "ubuntu", "browser"]] - """The type of computer environment to control.""" - - type: Required[Literal["computer_use_preview"]] - """The type of the computer use tool. Always `computer_use_preview`.""" - - -class ToolWebSearchToolUserLocation(WebSearchLocationParam, total=False): - type: Required[Literal["approximate"]] - """The type of location approximation. Always `approximate`.""" - - -class ToolWebSearchTool(TypedDict, total=False): - type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] - """The type of the web search tool. One of: - - - `web_search_preview` - - `web_search_preview_2025_03_11` - """ - - search_context_size: WebSearchContextSize - """ - High level guidance for the amount of context window space to use for the - search. One of `low`, `medium`, or `high`. `medium` is the default. - """ - - user_location: Optional[ToolWebSearchToolUserLocation] - """Approximate location parameters for the search.""" - - -Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool] diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py b/src/digitalocean_genai_sdk/types/response_list_input_items_params.py deleted file mode 100644 index cba0c8b8..00000000 --- a/src/digitalocean_genai_sdk/types/response_list_input_items_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["ResponseListInputItemsParams"] - - -class ResponseListInputItemsParams(TypedDict, total=False): - after: str - """An item ID to list items after, used in pagination.""" - - before: str - """An item ID to list items before, used in pagination.""" - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """The order to return the input items in. Default is `asc`. - - - `asc`: Return the input items in ascending order. - - `desc`: Return the input items in descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py b/src/digitalocean_genai_sdk/types/response_list_input_items_response.py deleted file mode 100644 index 95f4555e..00000000 --- a/src/digitalocean_genai_sdk/types/response_list_input_items_response.py +++ /dev/null @@ -1,76 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union -from typing_extensions import Literal, Annotated, TypeAlias - -from .._utils import PropertyInfo -from .._models import BaseModel -from .input_message import InputMessage -from .output_message import OutputMessage -from .computer_tool_call import ComputerToolCall -from .function_tool_call import FunctionToolCall -from .web_search_tool_call import WebSearchToolCall -from .file_search_tool_call import FileSearchToolCall -from .computer_tool_call_output import ComputerToolCallOutput -from .function_tool_call_output import FunctionToolCallOutput - -__all__ = [ - "ResponseListInputItemsResponse", - "Data", - "DataMessage", - "DataComputerCallOutput", - "DataFunctionCall", - "DataFunctionCallOutput", -] - - -class DataMessage(InputMessage): - id: str - """The unique ID of the message input.""" - - -class DataComputerCallOutput(ComputerToolCallOutput): - id: str # type: ignore - """The unique ID of the computer call tool output.""" - - -class DataFunctionCall(FunctionToolCall): - id: str # type: ignore - """The unique ID of the function tool call.""" - - -class DataFunctionCallOutput(FunctionToolCallOutput): - id: str # type: ignore - """The unique ID of the function call tool output.""" - - -Data: TypeAlias = Annotated[ - Union[ - DataMessage, - OutputMessage, - FileSearchToolCall, - ComputerToolCall, - DataComputerCallOutput, - WebSearchToolCall, - DataFunctionCall, - DataFunctionCallOutput, - ], - PropertyInfo(discriminator="type"), -] - - -class ResponseListInputItemsResponse(BaseModel): - data: List[Data] - """A list of items used to generate this response.""" - - first_id: str - """The ID of the first item in the list.""" - - has_more: bool - """Whether there are more items available.""" - - last_id: str - """The ID of the last item in the list.""" - - object: Literal["list"] - """The type of object returned, must be `list`.""" diff --git a/src/digitalocean_genai_sdk/types/response_properties.py b/src/digitalocean_genai_sdk/types/response_properties.py deleted file mode 100644 index 84746be5..00000000 --- a/src/digitalocean_genai_sdk/types/response_properties.py +++ /dev/null @@ -1,362 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from pydantic import Field as FieldInfo - -from .._models import BaseModel -from .compound_filter import CompoundFilter -from .reasoning_effort import ReasoningEffort -from .comparison_filter import ComparisonFilter -from .chat.web_search_location import WebSearchLocation -from .chat.response_format_text import ResponseFormatText -from .chat.web_search_context_size import WebSearchContextSize -from .chat.response_format_json_object import ResponseFormatJsonObject - -__all__ = [ - "ResponseProperties", - "Reasoning", - "Text", - "TextFormat", - "TextFormatTextResponseFormatJsonSchema", - "ToolChoice", - "ToolChoiceToolChoiceTypes", - "ToolChoiceToolChoiceFunction", - "Tool", - "ToolFileSearchTool", - "ToolFileSearchToolFilters", - "ToolFileSearchToolRankingOptions", - "ToolFunctionTool", - "ToolComputerTool", - "ToolWebSearchTool", - "ToolWebSearchToolUserLocation", -] - - -class Reasoning(BaseModel): - effort: Optional[ReasoningEffort] = None - """**o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - """ - - generate_summary: Optional[Literal["concise", "detailed"]] = None - """**computer_use_preview only** - - A summary of the reasoning performed by the model. This can be useful for - debugging and understanding the model's reasoning process. One of `concise` or - `detailed`. - """ - - -class TextFormatTextResponseFormatJsonSchema(BaseModel): - schema_: Dict[str, object] = FieldInfo(alias="schema") - """ - The schema for the response format, described as a JSON Schema object. Learn how - to build JSON schemas [here](https://json-schema.org/). - """ - - type: Literal["json_schema"] - """The type of response format being defined. Always `json_schema`.""" - - description: Optional[str] = None - """ - A description of what the response format is for, used by the model to determine - how to respond in the format. - """ - - name: Optional[str] = None - """The name of the response format. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - strict: Optional[bool] = None - """ - Whether to enable strict schema adherence when generating the output. If set to - true, the model will always follow the exact schema defined in the `schema` - field. Only a subset of JSON Schema is supported when `strict` is `true`. To - learn more, read the - [Structured Outputs guide](/docs/guides/structured-outputs). - """ - - -TextFormat: TypeAlias = Union[ResponseFormatText, TextFormatTextResponseFormatJsonSchema, ResponseFormatJsonObject] - - -class Text(BaseModel): - format: Optional[TextFormat] = None - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - -class ToolChoiceToolChoiceTypes(BaseModel): - type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] - """The type of hosted tool the model should to use. - - Learn more about [built-in tools](/docs/guides/tools). - - Allowed values are: - - - `file_search` - - `web_search_preview` - - `computer_use_preview` - """ - - -class ToolChoiceToolChoiceFunction(BaseModel): - name: str - """The name of the function to call.""" - - type: Literal["function"] - """For function calling, the type is always `function`.""" - - -ToolChoice: TypeAlias = Union[ - Literal["none", "auto", "required"], ToolChoiceToolChoiceTypes, ToolChoiceToolChoiceFunction -] - -ToolFileSearchToolFilters: TypeAlias = Union[ComparisonFilter, CompoundFilter] - - -class ToolFileSearchToolRankingOptions(BaseModel): - ranker: Optional[Literal["auto", "default-2024-11-15"]] = None - """The ranker to use for the file search.""" - - score_threshold: Optional[float] = None - """ - The score threshold for the file search, a number between 0 and 1. Numbers - closer to 1 will attempt to return only the most relevant results, but may - return fewer results. - """ - - -class ToolFileSearchTool(BaseModel): - type: Literal["file_search"] - """The type of the file search tool. Always `file_search`.""" - - vector_store_ids: List[str] - """The IDs of the vector stores to search.""" - - filters: Optional[ToolFileSearchToolFilters] = None - """A filter to apply based on file attributes.""" - - max_num_results: Optional[int] = None - """The maximum number of results to return. - - This number should be between 1 and 50 inclusive. - """ - - ranking_options: Optional[ToolFileSearchToolRankingOptions] = None - """Ranking options for search.""" - - -class ToolFunctionTool(BaseModel): - name: str - """The name of the function to call.""" - - parameters: Dict[str, object] - """A JSON schema object describing the parameters of the function.""" - - strict: bool - """Whether to enforce strict parameter validation. Default `true`.""" - - type: Literal["function"] - """The type of the function tool. Always `function`.""" - - description: Optional[str] = None - """A description of the function. - - Used by the model to determine whether or not to call the function. - """ - - -class ToolComputerTool(BaseModel): - display_height: float - """The height of the computer display.""" - - display_width: float - """The width of the computer display.""" - - environment: Literal["mac", "windows", "ubuntu", "browser"] - """The type of computer environment to control.""" - - type: Literal["computer_use_preview"] - """The type of the computer use tool. Always `computer_use_preview`.""" - - -class ToolWebSearchToolUserLocation(WebSearchLocation): - type: Literal["approximate"] - """The type of location approximation. Always `approximate`.""" - - -class ToolWebSearchTool(BaseModel): - type: Literal["web_search_preview", "web_search_preview_2025_03_11"] - """The type of the web search tool. One of: - - - `web_search_preview` - - `web_search_preview_2025_03_11` - """ - - search_context_size: Optional[WebSearchContextSize] = None - """ - High level guidance for the amount of context window space to use for the - search. One of `low`, `medium`, or `high`. `medium` is the default. - """ - - user_location: Optional[ToolWebSearchToolUserLocation] = None - """Approximate location parameters for the search.""" - - -Tool: TypeAlias = Union[ToolFileSearchTool, ToolFunctionTool, ToolComputerTool, ToolWebSearchTool] - - -class ResponseProperties(BaseModel): - instructions: Optional[str] = None - """ - Inserts a system (or developer) message as the first item in the model's - context. - - When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. - """ - - max_output_tokens: Optional[int] = None - """ - An upper bound for the number of tokens that can be generated for a response, - including visible output tokens and [reasoning tokens](/docs/guides/reasoning). - """ - - model: Union[ - Literal[ - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "gpt-4o-search-preview", - "gpt-4o-mini-search-preview", - "gpt-4o-search-preview-2025-03-11", - "gpt-4o-mini-search-preview-2025-03-11", - "chatgpt-4o-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - "o1-pro", - "o1-pro-2025-03-19", - "computer-use-preview", - "computer-use-preview-2025-03-11", - ], - str, - None, - ] = None - """Model ID used to generate the response, like `gpt-4o` or `o1`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the [model guide](/docs/models) to - browse and compare available models. - """ - - previous_response_id: Optional[str] = None - """The unique ID of the previous response to the model. - - Use this to create multi-turn conversations. Learn more about - [conversation state](/docs/guides/conversation-state). - """ - - reasoning: Optional[Reasoning] = None - """**o-series models only** - - Configuration options for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). - """ - - text: Optional[Text] = None - """Configuration options for a text response from the model. - - Can be plain text or structured JSON data. Learn more: - - - [Text inputs and outputs](/docs/guides/text) - - [Structured Outputs](/docs/guides/structured-outputs) - """ - - tool_choice: Optional[ToolChoice] = None - """ - How the model should select which tool (or tools) to use when generating a - response. See the `tools` parameter to see how to specify which tools the model - can call. - """ - - tools: Optional[List[Tool]] = None - """An array of tools the model may call while generating a response. - - You can specify which tool to use by setting the `tool_choice` parameter. - - The two categories of tools you can provide the model are: - - - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - capabilities, like [web search](/docs/guides/tools-web-search) or - [file search](/docs/guides/tools-file-search). Learn more about - [built-in tools](/docs/guides/tools). - - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about - [function calling](/docs/guides/function-calling). - """ - - truncation: Optional[Literal["auto", "disabled"]] = None - """The truncation strategy to use for the model response. - - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size - for a model, the request will fail with a 400 error. - """ diff --git a/src/digitalocean_genai_sdk/types/response_retrieve_params.py b/src/digitalocean_genai_sdk/types/response_retrieve_params.py deleted file mode 100644 index b85dbba1..00000000 --- a/src/digitalocean_genai_sdk/types/response_retrieve_params.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -from .includable import Includable - -__all__ = ["ResponseRetrieveParams"] - - -class ResponseRetrieveParams(TypedDict, total=False): - include: List[Includable] - """Specify additional output data to include in the response. - - Currently supported values are: - - - `file_search_call.results`: Include the search results of - - the file search tool call. - - - `message.input_image.image_url`: Include image urls from the input message. - - `computer_call_output.output.image_url`: Include image urls from the computer - call output. - """ diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy.py deleted file mode 100644 index a4c0ce82..00000000 --- a/src/digitalocean_genai_sdk/types/static_chunking_strategy.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .._models import BaseModel - -__all__ = ["StaticChunkingStrategy"] - - -class StaticChunkingStrategy(BaseModel): - chunk_overlap_tokens: int - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: int - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py deleted file mode 100644 index c3535404..00000000 --- a/src/digitalocean_genai_sdk/types/static_chunking_strategy_param.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["StaticChunkingStrategyParam"] - - -class StaticChunkingStrategyParam(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ diff --git a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py deleted file mode 100644 index 51de3b75..00000000 --- a/src/digitalocean_genai_sdk/types/static_chunking_strategy_request_param.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -from .static_chunking_strategy_param import StaticChunkingStrategyParam - -__all__ = ["StaticChunkingStrategyRequestParam"] - - -class StaticChunkingStrategyRequestParam(TypedDict, total=False): - static: Required[StaticChunkingStrategyParam] - - type: Required[Literal["static"]] - """Always `static`.""" diff --git a/src/digitalocean_genai_sdk/types/thread_create_params.py b/src/digitalocean_genai_sdk/types/thread_create_params.py deleted file mode 100644 index 7ee77039..00000000 --- a/src/digitalocean_genai_sdk/types/thread_create_params.py +++ /dev/null @@ -1,130 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .threads.create_message_request_param import CreateMessageRequestParam - -__all__ = [ - "ThreadCreateParams", - "ToolResources", - "ToolResourcesCodeInterpreter", - "ToolResourcesFileSearch", - "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic", -] - - -class ThreadCreateParams(TypedDict, total=False): - messages: Iterable[CreateMessageRequestParam] - """A list of [messages](/docs/api-reference/messages) to start the thread with.""" - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - tool_resources: Optional[ToolResources] - """ - A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ - - -class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategyStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAutoChunkingStrategy, - ToolResourcesFileSearchVectorStoreChunkingStrategyStaticChunkingStrategy, -] - - -class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy - """The chunking strategy used to chunk the file(s). - - If not set, will use the `auto` strategy. - """ - - file_ids: List[str] - """A list of [file](/docs/api-reference/files) IDs to add to the vector store. - - There can be a maximum of 10000 files in a vector store. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - -class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] - """ - The [vector store](/docs/api-reference/vector-stores/object) attached to this - thread. There can be a maximum of 1 vector store attached to the thread. - """ - - vector_stores: Iterable[ToolResourcesFileSearchVectorStore] - """ - A helper to create a [vector store](/docs/api-reference/vector-stores/object) - with file_ids and attach it to this thread. There can be a maximum of 1 vector - store attached to the thread. - """ - - -class ToolResources(TypedDict, total=False): - code_interpreter: ToolResourcesCodeInterpreter - - file_search: ToolResourcesFileSearch diff --git a/src/digitalocean_genai_sdk/types/thread_delete_response.py b/src/digitalocean_genai_sdk/types/thread_delete_response.py deleted file mode 100644 index 74f09d84..00000000 --- a/src/digitalocean_genai_sdk/types/thread_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ThreadDeleteResponse"] - - -class ThreadDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["thread.deleted"] diff --git a/src/digitalocean_genai_sdk/types/thread_object.py b/src/digitalocean_genai_sdk/types/thread_object.py deleted file mode 100644 index 7924dd8f..00000000 --- a/src/digitalocean_genai_sdk/types/thread_object.py +++ /dev/null @@ -1,60 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ThreadObject", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] - - -class ToolResourcesCodeInterpreter(BaseModel): - file_ids: Optional[List[str]] = None - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearch(BaseModel): - vector_store_ids: Optional[List[str]] = None - """ - The [vector store](/docs/api-reference/vector-stores/object) attached to this - thread. There can be a maximum of 1 vector store attached to the thread. - """ - - -class ToolResources(BaseModel): - code_interpreter: Optional[ToolResourcesCodeInterpreter] = None - - file_search: Optional[ToolResourcesFileSearch] = None - - -class ThreadObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the thread was created.""" - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - object: Literal["thread"] - """The object type, which is always `thread`.""" - - tool_resources: Optional[ToolResources] = None - """ - A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ diff --git a/src/digitalocean_genai_sdk/types/thread_update_params.py b/src/digitalocean_genai_sdk/types/thread_update_params.py deleted file mode 100644 index d952d35b..00000000 --- a/src/digitalocean_genai_sdk/types/thread_update_params.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Optional -from typing_extensions import TypedDict - -__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] - - -class ThreadUpdateParams(TypedDict, total=False): - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - tool_resources: Optional[ToolResources] - """ - A set of resources that are made available to the assistant's tools in this - thread. The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ - - -class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] - """ - The [vector store](/docs/api-reference/vector-stores/object) attached to this - thread. There can be a maximum of 1 vector store attached to the thread. - """ - - -class ToolResources(TypedDict, total=False): - code_interpreter: ToolResourcesCodeInterpreter - - file_search: ToolResourcesFileSearch diff --git a/src/digitalocean_genai_sdk/types/threads/__init__.py b/src/digitalocean_genai_sdk/types/threads/__init__.py index 9af8d93a..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/threads/__init__.py +++ b/src/digitalocean_genai_sdk/types/threads/__init__.py @@ -1,36 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .run_object import RunObject as RunObject -from .message_object import MessageObject as MessageObject -from .run_list_params import RunListParams as RunListParams -from .run_create_params import RunCreateParams as RunCreateParams -from .run_list_response import RunListResponse as RunListResponse -from .run_update_params import RunUpdateParams as RunUpdateParams -from .truncation_object import TruncationObject as TruncationObject -from .message_list_params import MessageListParams as MessageListParams -from .message_create_params import MessageCreateParams as MessageCreateParams -from .message_list_response import MessageListResponse as MessageListResponse -from .message_update_params import MessageUpdateParams as MessageUpdateParams -from .run_create_run_params import RunCreateRunParams as RunCreateRunParams -from .message_delete_response import MessageDeleteResponse as MessageDeleteResponse -from .truncation_object_param import TruncationObjectParam as TruncationObjectParam -from .create_message_request_param import CreateMessageRequestParam as CreateMessageRequestParam -from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams -from .message_content_image_url_object import MessageContentImageURLObject as MessageContentImageURLObject -from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption as AssistantsAPIToolChoiceOption -from .message_content_image_file_object import MessageContentImageFileObject as MessageContentImageFileObject -from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly as AssistantToolsFileSearchTypeOnly -from .message_content_image_url_object_param import ( - MessageContentImageURLObjectParam as MessageContentImageURLObjectParam, -) -from .assistants_api_tool_choice_option_param import ( - AssistantsAPIToolChoiceOptionParam as AssistantsAPIToolChoiceOptionParam, -) -from .message_content_image_file_object_param import ( - MessageContentImageFileObjectParam as MessageContentImageFileObjectParam, -) -from .assistant_tools_file_search_type_only_param import ( - AssistantToolsFileSearchTypeOnlyParam as AssistantToolsFileSearchTypeOnlyParam, -) diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py deleted file mode 100644 index 6708bff3..00000000 --- a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["AssistantToolsFileSearchTypeOnly"] - - -class AssistantToolsFileSearchTypeOnly(BaseModel): - type: Literal["file_search"] - """The type of tool being defined: `file_search`""" diff --git a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py b/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py deleted file mode 100644 index f0a48b2c..00000000 --- a/src/digitalocean_genai_sdk/types/threads/assistant_tools_file_search_type_only_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["AssistantToolsFileSearchTypeOnlyParam"] - - -class AssistantToolsFileSearchTypeOnlyParam(TypedDict, total=False): - type: Required[Literal["file_search"]] - """The type of tool being defined: `file_search`""" diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py deleted file mode 100644 index af7be1f7..00000000 --- a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel - -__all__ = ["AssistantsAPIToolChoiceOption", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"] - - -class AssistantsNamedToolChoiceFunction(BaseModel): - name: str - """The name of the function to call.""" - - -class AssistantsNamedToolChoice(BaseModel): - type: Literal["function", "code_interpreter", "file_search"] - """The type of the tool. If type is `function`, the function name must be set""" - - function: Optional[AssistantsNamedToolChoiceFunction] = None - - -AssistantsAPIToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice] diff --git a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py b/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py deleted file mode 100644 index 10f98f89..00000000 --- a/src/digitalocean_genai_sdk/types/threads/assistants_api_tool_choice_option_param.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["AssistantsAPIToolChoiceOptionParam", "AssistantsNamedToolChoice", "AssistantsNamedToolChoiceFunction"] - - -class AssistantsNamedToolChoiceFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - -class AssistantsNamedToolChoice(TypedDict, total=False): - type: Required[Literal["function", "code_interpreter", "file_search"]] - """The type of the tool. If type is `function`, the function name must be set""" - - function: AssistantsNamedToolChoiceFunction - - -AssistantsAPIToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantsNamedToolChoice] diff --git a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py b/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py deleted file mode 100644 index 64c2a781..00000000 --- a/src/digitalocean_genai_sdk/types/threads/create_message_request_param.py +++ /dev/null @@ -1,71 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..assistant_tools_code_param import AssistantToolsCodeParam -from .message_content_image_url_object_param import MessageContentImageURLObjectParam -from .message_content_image_file_object_param import MessageContentImageFileObjectParam -from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam - -__all__ = [ - "CreateMessageRequestParam", - "ContentArrayOfContentPart", - "ContentArrayOfContentPartMessageRequestContentTextObject", - "Attachment", - "AttachmentTool", -] - - -class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False): - text: Required[str] - """Text content to be sent to the model""" - - type: Required[Literal["text"]] - """Always `text`.""" - - -ContentArrayOfContentPart: TypeAlias = Union[ - MessageContentImageFileObjectParam, - MessageContentImageURLObjectParam, - ContentArrayOfContentPartMessageRequestContentTextObject, -] - -AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam] - - -class Attachment(TypedDict, total=False): - file_id: str - """The ID of the file to attach to the message.""" - - tools: Iterable[AttachmentTool] - """The tools to add this file to.""" - - -class CreateMessageRequestParam(TypedDict, total=False): - content: Required[Union[str, Iterable[ContentArrayOfContentPart]]] - """The text contents of the message.""" - - role: Required[Literal["user", "assistant"]] - """The role of the entity that is creating the message. Allowed values include: - - - `user`: Indicates the message is sent by an actual user and should be used in - most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this - value to insert messages from the assistant into the conversation. - """ - - attachments: Optional[Iterable[Attachment]] - """A list of files attached to the message, and the tools they should be added to.""" - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py deleted file mode 100644 index b22ef410..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["MessageContentImageFileObject", "ImageFile"] - - -class ImageFile(BaseModel): - file_id: str - """The [File](/docs/api-reference/files) ID of the image in the message content. - - Set `purpose="vision"` when uploading the File if you need to later display the - file content. - """ - - detail: Optional[Literal["auto", "low", "high"]] = None - """Specifies the detail level of the image if specified by the user. - - `low` uses fewer tokens, you can opt in to high resolution using `high`. - """ - - -class MessageContentImageFileObject(BaseModel): - image_file: ImageFile - - type: Literal["image_file"] - """Always `image_file`.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py deleted file mode 100644 index 734dcf15..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_content_image_file_object_param.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["MessageContentImageFileObjectParam", "ImageFile"] - - -class ImageFile(TypedDict, total=False): - file_id: Required[str] - """The [File](/docs/api-reference/files) ID of the image in the message content. - - Set `purpose="vision"` when uploading the File if you need to later display the - file content. - """ - - detail: Literal["auto", "low", "high"] - """Specifies the detail level of the image if specified by the user. - - `low` uses fewer tokens, you can opt in to high resolution using `high`. - """ - - -class MessageContentImageFileObjectParam(TypedDict, total=False): - image_file: Required[ImageFile] - - type: Required[Literal["image_file"]] - """Always `image_file`.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py deleted file mode 100644 index 9a7f980b..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["MessageContentImageURLObject", "ImageURL"] - - -class ImageURL(BaseModel): - url: str - """ - The external URL of the image, must be a supported image types: jpeg, jpg, png, - gif, webp. - """ - - detail: Optional[Literal["auto", "low", "high"]] = None - """Specifies the detail level of the image. - - `low` uses fewer tokens, you can opt in to high resolution using `high`. Default - value is `auto` - """ - - -class MessageContentImageURLObject(BaseModel): - image_url: ImageURL - - type: Literal["image_url"] - """The type of the content part.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py b/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py deleted file mode 100644 index f3f777c4..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_content_image_url_object_param.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["MessageContentImageURLObjectParam", "ImageURL"] - - -class ImageURL(TypedDict, total=False): - url: Required[str] - """ - The external URL of the image, must be a supported image types: jpeg, jpg, png, - gif, webp. - """ - - detail: Literal["auto", "low", "high"] - """Specifies the detail level of the image. - - `low` uses fewer tokens, you can opt in to high resolution using `high`. Default - value is `auto` - """ - - -class MessageContentImageURLObjectParam(TypedDict, total=False): - image_url: Required[ImageURL] - - type: Required[Literal["image_url"]] - """The type of the content part.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_create_params.py b/src/digitalocean_genai_sdk/types/threads/message_create_params.py deleted file mode 100644 index d9a4cd40..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_create_params.py +++ /dev/null @@ -1,71 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..assistant_tools_code_param import AssistantToolsCodeParam -from .message_content_image_url_object_param import MessageContentImageURLObjectParam -from .message_content_image_file_object_param import MessageContentImageFileObjectParam -from .assistant_tools_file_search_type_only_param import AssistantToolsFileSearchTypeOnlyParam - -__all__ = [ - "MessageCreateParams", - "ContentArrayOfContentPart", - "ContentArrayOfContentPartMessageRequestContentTextObject", - "Attachment", - "AttachmentTool", -] - - -class MessageCreateParams(TypedDict, total=False): - content: Required[Union[str, Iterable[ContentArrayOfContentPart]]] - """The text contents of the message.""" - - role: Required[Literal["user", "assistant"]] - """The role of the entity that is creating the message. Allowed values include: - - - `user`: Indicates the message is sent by an actual user and should be used in - most cases to represent user-generated messages. - - `assistant`: Indicates the message is generated by the assistant. Use this - value to insert messages from the assistant into the conversation. - """ - - attachments: Optional[Iterable[Attachment]] - """A list of files attached to the message, and the tools they should be added to.""" - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - -class ContentArrayOfContentPartMessageRequestContentTextObject(TypedDict, total=False): - text: Required[str] - """Text content to be sent to the model""" - - type: Required[Literal["text"]] - """Always `text`.""" - - -ContentArrayOfContentPart: TypeAlias = Union[ - MessageContentImageFileObjectParam, - MessageContentImageURLObjectParam, - ContentArrayOfContentPartMessageRequestContentTextObject, -] - -AttachmentTool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchTypeOnlyParam] - - -class Attachment(TypedDict, total=False): - file_id: str - """The ID of the file to attach to the message.""" - - tools: Iterable[AttachmentTool] - """The tools to add this file to.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py b/src/digitalocean_genai_sdk/types/threads/message_delete_response.py deleted file mode 100644 index c86408dc..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["MessageDeleteResponse"] - - -class MessageDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["thread.message.deleted"] diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_params.py b/src/digitalocean_genai_sdk/types/threads/message_list_params.py deleted file mode 100644 index a7c22a66..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_list_params.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["MessageListParams"] - - -class MessageListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ - - run_id: str - """Filter messages by the run ID that generated them.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_list_response.py b/src/digitalocean_genai_sdk/types/threads/message_list_response.py deleted file mode 100644 index f710da32..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel -from .message_object import MessageObject - -__all__ = ["MessageListResponse"] - - -class MessageListResponse(BaseModel): - data: List[MessageObject] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/threads/message_object.py b/src/digitalocean_genai_sdk/types/threads/message_object.py deleted file mode 100644 index b2cb3711..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_object.py +++ /dev/null @@ -1,179 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel -from ..assistant_tools_code import AssistantToolsCode -from .message_content_image_url_object import MessageContentImageURLObject -from .message_content_image_file_object import MessageContentImageFileObject -from .assistant_tools_file_search_type_only import AssistantToolsFileSearchTypeOnly - -__all__ = [ - "MessageObject", - "Attachment", - "AttachmentTool", - "Content", - "ContentMessageContentTextObject", - "ContentMessageContentTextObjectText", - "ContentMessageContentTextObjectTextAnnotation", - "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject", - "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation", - "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject", - "ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath", - "ContentMessageContentRefusalObject", - "IncompleteDetails", -] - -AttachmentTool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearchTypeOnly] - - -class Attachment(BaseModel): - file_id: Optional[str] = None - """The ID of the file to attach to the message.""" - - tools: Optional[List[AttachmentTool]] = None - """The tools to add this file to.""" - - -class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation( - BaseModel -): - file_id: str - """The ID of the specific File the citation is from.""" - - -class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject(BaseModel): - end_index: int - - file_citation: ( - ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObjectFileCitation - ) - - start_index: int - - text: str - """The text in the message content that needs to be replaced.""" - - type: Literal["file_citation"] - """Always `file_citation`.""" - - -class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath(BaseModel): - file_id: str - """The ID of the file that was generated.""" - - -class ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject(BaseModel): - end_index: int - - file_path: ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObjectFilePath - - start_index: int - - text: str - """The text in the message content that needs to be replaced.""" - - type: Literal["file_path"] - """Always `file_path`.""" - - -ContentMessageContentTextObjectTextAnnotation: TypeAlias = Union[ - ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFileCitationObject, - ContentMessageContentTextObjectTextAnnotationMessageContentTextAnnotationsFilePathObject, -] - - -class ContentMessageContentTextObjectText(BaseModel): - annotations: List[ContentMessageContentTextObjectTextAnnotation] - - value: str - """The data that makes up the text.""" - - -class ContentMessageContentTextObject(BaseModel): - text: ContentMessageContentTextObjectText - - type: Literal["text"] - """Always `text`.""" - - -class ContentMessageContentRefusalObject(BaseModel): - refusal: str - - type: Literal["refusal"] - """Always `refusal`.""" - - -Content: TypeAlias = Union[ - MessageContentImageFileObject, - MessageContentImageURLObject, - ContentMessageContentTextObject, - ContentMessageContentRefusalObject, -] - - -class IncompleteDetails(BaseModel): - reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"] - """The reason the message is incomplete.""" - - -class MessageObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - assistant_id: Optional[str] = None - """ - If applicable, the ID of the [assistant](/docs/api-reference/assistants) that - authored this message. - """ - - attachments: Optional[List[Attachment]] = None - """A list of files attached to the message, and the tools they were added to.""" - - completed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the message was completed.""" - - content: List[Content] - """The content of the message in array of text and/or images.""" - - created_at: int - """The Unix timestamp (in seconds) for when the message was created.""" - - incomplete_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the message was marked as incomplete.""" - - incomplete_details: Optional[IncompleteDetails] = None - """On an incomplete message, details about why the message is incomplete.""" - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - object: Literal["thread.message"] - """The object type, which is always `thread.message`.""" - - role: Literal["user", "assistant"] - """The entity that produced the message. One of `user` or `assistant`.""" - - run_id: Optional[str] = None - """ - The ID of the [run](/docs/api-reference/runs) associated with the creation of - this message. Value is `null` when messages are created manually using the - create message or create thread endpoints. - """ - - status: Literal["in_progress", "incomplete", "completed"] - """ - The status of the message, which can be either `in_progress`, `incomplete`, or - `completed`. - """ - - thread_id: str - """The [thread](/docs/api-reference/threads) ID that this message belongs to.""" diff --git a/src/digitalocean_genai_sdk/types/threads/message_update_params.py b/src/digitalocean_genai_sdk/types/threads/message_update_params.py deleted file mode 100644 index a2e25260..00000000 --- a/src/digitalocean_genai_sdk/types/threads/message_update_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["MessageUpdateParams"] - - -class MessageUpdateParams(TypedDict, total=False): - thread_id: Required[str] - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_params.py deleted file mode 100644 index 43d0611a..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_create_params.py +++ /dev/null @@ -1,215 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .truncation_object_param import TruncationObjectParam -from ..assistant_tools_code_param import AssistantToolsCodeParam -from ..create_thread_request_param import CreateThreadRequestParam -from ..assistant_tools_function_param import AssistantToolsFunctionParam -from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam -from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam -from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam - -__all__ = ["RunCreateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "Tool"] - - -class RunCreateParams(TypedDict, total=False): - assistant_id: Required[str] - """ - The ID of the [assistant](/docs/api-reference/assistants) to use to execute this - run. - """ - - instructions: Optional[str] - """Override the default system message of the assistant. - - This is useful for modifying the behavior on a per-run basis. - """ - - max_completion_tokens: Optional[int] - """ - The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - """ - - max_prompt_tokens: Optional[int] - """The maximum number of prompt tokens that may be used over the course of the run. - - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - model: Union[ - str, - Literal[ - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4.5-preview", - "gpt-4.5-preview-2025-02-27", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - ], - None, - ] - """The ID of the [Model](/docs/api-reference/models) to be used to execute this - run. - - If a value is provided here, it will override the model associated with the - assistant. If not, the model associated with the assistant will be used. - """ - - parallel_tool_calls: bool - """ - Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - """ - - response_format: Optional[AssistantsAPIResponseFormatOptionParam] - """Specifies the format that the model must output. - - Compatible with [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - """ - - stream: Optional[bool] - """ - If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - """ - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - """ - - thread: CreateThreadRequestParam - """Options to create a new thread. - - If no thread is provided when running a request, an empty thread will be - created. - """ - - tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] - """ - Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - """ - - tool_resources: Optional[ToolResources] - """A set of resources that are used by the assistant's tools. - - The resources are specific to the type of tool. For example, the - `code_interpreter` tool requires a list of file IDs, while the `file_search` - tool requires a list of vector store IDs. - """ - - tools: Optional[Iterable[Tool]] - """Override the tools the assistant can use for this run. - - This is useful for modifying the behavior on a per-run basis. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - """ - - truncation_strategy: Optional[TruncationObjectParam] - """Controls for how a thread will be truncated prior to the run. - - Use this to control the intial context window of the run. - """ - - -class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] - """ - A list of [file](/docs/api-reference/files) IDs made available to the - `code_interpreter` tool. There can be a maximum of 20 files associated with the - tool. - """ - - -class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] - """ - The ID of the [vector store](/docs/api-reference/vector-stores/object) attached - to this assistant. There can be a maximum of 1 vector store attached to the - assistant. - """ - - -class ToolResources(TypedDict, total=False): - code_interpreter: ToolResourcesCodeInterpreter - - file_search: ToolResourcesFileSearch - - -Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py b/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py deleted file mode 100644 index 694c7eea..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_create_run_params.py +++ /dev/null @@ -1,178 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..reasoning_effort import ReasoningEffort -from .truncation_object_param import TruncationObjectParam -from ..assistant_supported_models import AssistantSupportedModels -from ..assistant_tools_code_param import AssistantToolsCodeParam -from .create_message_request_param import CreateMessageRequestParam -from ..assistant_tools_function_param import AssistantToolsFunctionParam -from ..assistant_tools_file_search_param import AssistantToolsFileSearchParam -from .assistants_api_tool_choice_option_param import AssistantsAPIToolChoiceOptionParam -from ..assistants_api_response_format_option_param import AssistantsAPIResponseFormatOptionParam - -__all__ = ["RunCreateRunParams", "Tool"] - - -class RunCreateRunParams(TypedDict, total=False): - assistant_id: Required[str] - """ - The ID of the [assistant](/docs/api-reference/assistants) to use to execute this - run. - """ - - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] - """A list of additional fields to include in the response. - - Currently the only supported value is - `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ - - additional_instructions: Optional[str] - """Appends additional instructions at the end of the instructions for the run. - - This is useful for modifying the behavior on a per-run basis without overriding - other instructions. - """ - - additional_messages: Optional[Iterable[CreateMessageRequestParam]] - """Adds additional messages to the thread before creating the run.""" - - instructions: Optional[str] - """ - Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of - the assistant. This is useful for modifying the behavior on a per-run basis. - """ - - max_completion_tokens: Optional[int] - """ - The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. If the run exceeds the number of - completion tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - """ - - max_prompt_tokens: Optional[int] - """The maximum number of prompt tokens that may be used over the course of the run. - - The run will make a best effort to use only the number of prompt tokens - specified, across multiple turns of the run. If the run exceeds the number of - prompt tokens specified, the run will end with status `incomplete`. See - `incomplete_details` for more info. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - model: Union[str, AssistantSupportedModels, None] - """The ID of the [Model](/docs/api-reference/models) to be used to execute this - run. - - If a value is provided here, it will override the model associated with the - assistant. If not, the model associated with the assistant will be used. - """ - - parallel_tool_calls: bool - """ - Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - """ - - reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - - Constrains effort on reasoning for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. - """ - - response_format: Optional[AssistantsAPIResponseFormatOptionParam] - """Specifies the format that the model must output. - - Compatible with [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - """ - - stream: Optional[bool] - """ - If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - """ - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - """ - - tool_choice: Optional[AssistantsAPIToolChoiceOptionParam] - """ - Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - """ - - tools: Optional[Iterable[Tool]] - """Override the tools the assistant can use for this run. - - This is useful for modifying the behavior on a per-run basis. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. - """ - - truncation_strategy: Optional[TruncationObjectParam] - """Controls for how a thread will be truncated prior to the run. - - Use this to control the intial context window of the run. - """ - - -Tool: TypeAlias = Union[AssistantToolsCodeParam, AssistantToolsFileSearchParam, AssistantToolsFunctionParam] diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_params.py b/src/digitalocean_genai_sdk/types/threads/run_list_params.py deleted file mode 100644 index fbea54f6..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_list_params.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["RunListParams"] - - -class RunListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/run_list_response.py b/src/digitalocean_genai_sdk/types/threads/run_list_response.py deleted file mode 100644 index 899bd0f9..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel -from .run_object import RunObject - -__all__ = ["RunListResponse"] - - -class RunListResponse(BaseModel): - data: List[RunObject] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/threads/run_object.py b/src/digitalocean_genai_sdk/types/threads/run_object.py deleted file mode 100644 index fa89f4b4..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_object.py +++ /dev/null @@ -1,265 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel -from .truncation_object import TruncationObject -from ..assistant_tools_code import AssistantToolsCode -from ..assistant_tools_function import AssistantToolsFunction -from ..assistant_tools_file_search import AssistantToolsFileSearch -from .assistants_api_tool_choice_option import AssistantsAPIToolChoiceOption -from ..assistants_api_response_format_option import AssistantsAPIResponseFormatOption - -__all__ = [ - "RunObject", - "IncompleteDetails", - "LastError", - "RequiredAction", - "RequiredActionSubmitToolOutputs", - "RequiredActionSubmitToolOutputsToolCall", - "RequiredActionSubmitToolOutputsToolCallFunction", - "Tool", - "Usage", -] - - -class IncompleteDetails(BaseModel): - reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None - """The reason why the run is incomplete. - - This will point to which specific token limit was reached over the course of the - run. - """ - - -class LastError(BaseModel): - code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"] - """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.""" - - message: str - """A human-readable description of the error.""" - - -class RequiredActionSubmitToolOutputsToolCallFunction(BaseModel): - arguments: str - """The arguments that the model expects you to pass to the function.""" - - name: str - """The name of the function.""" - - -class RequiredActionSubmitToolOutputsToolCall(BaseModel): - id: str - """The ID of the tool call. - - This ID must be referenced when you submit the tool outputs in using the - [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) - endpoint. - """ - - function: RequiredActionSubmitToolOutputsToolCallFunction - """The function definition.""" - - type: Literal["function"] - """The type of tool call the output is required for. - - For now, this is always `function`. - """ - - -class RequiredActionSubmitToolOutputs(BaseModel): - tool_calls: List[RequiredActionSubmitToolOutputsToolCall] - """A list of the relevant tool calls.""" - - -class RequiredAction(BaseModel): - submit_tool_outputs: RequiredActionSubmitToolOutputs - """Details on the tool outputs needed for this run to continue.""" - - type: Literal["submit_tool_outputs"] - """For now, this is always `submit_tool_outputs`.""" - - -Tool: TypeAlias = Union[AssistantToolsCode, AssistantToolsFileSearch, AssistantToolsFunction] - - -class Usage(BaseModel): - completion_tokens: int - """Number of completion tokens used over the course of the run.""" - - prompt_tokens: int - """Number of prompt tokens used over the course of the run.""" - - total_tokens: int - """Total number of tokens used (prompt + completion).""" - - -class RunObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - assistant_id: str - """ - The ID of the [assistant](/docs/api-reference/assistants) used for execution of - this run. - """ - - cancelled_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run was cancelled.""" - - completed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run was completed.""" - - created_at: int - """The Unix timestamp (in seconds) for when the run was created.""" - - expires_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run will expire.""" - - failed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run failed.""" - - incomplete_details: Optional[IncompleteDetails] = None - """Details on why the run is incomplete. - - Will be `null` if the run is not incomplete. - """ - - instructions: str - """ - The instructions that the [assistant](/docs/api-reference/assistants) used for - this run. - """ - - last_error: Optional[LastError] = None - """The last error associated with this run. Will be `null` if there are no errors.""" - - max_completion_tokens: Optional[int] = None - """ - The maximum number of completion tokens specified to have been used over the - course of the run. - """ - - max_prompt_tokens: Optional[int] = None - """ - The maximum number of prompt tokens specified to have been used over the course - of the run. - """ - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - model: str - """ - The model that the [assistant](/docs/api-reference/assistants) used for this - run. - """ - - object: Literal["thread.run"] - """The object type, which is always `thread.run`.""" - - parallel_tool_calls: bool - """ - Whether to enable - [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) - during tool use. - """ - - required_action: Optional[RequiredAction] = None - """Details on the action required to continue the run. - - Will be `null` if no action is required. - """ - - response_format: Optional[AssistantsAPIResponseFormatOption] = None - """Specifies the format that the model must output. - - Compatible with [GPT-4o](/docs/models#gpt-4o), - [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models - since `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Learn more - in the [Structured Outputs guide](/docs/guides/structured-outputs). - - Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. Without this, the model may - generate an unending stream of whitespace until the generation reaches the token - limit, resulting in a long-running and seemingly "stuck" request. Also note that - the message content may be partially cut off if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - """ - - started_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run was started.""" - - status: Literal[ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "incomplete", - "expired", - ] - """ - The status of the run, which can be either `queued`, `in_progress`, - `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - `incomplete`, or `expired`. - """ - - thread_id: str - """ - The ID of the [thread](/docs/api-reference/threads) that was executed on as a - part of this run. - """ - - tool_choice: Optional[AssistantsAPIToolChoiceOption] = None - """ - Controls which (if any) tool is called by the model. `none` means the model will - not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling one or more - tools. `required` means the model must call one or more tools before responding - to the user. Specifying a particular tool like `{"type": "file_search"}` or - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - """ - - tools: List[Tool] - """ - The list of tools that the [assistant](/docs/api-reference/assistants) used for - this run. - """ - - truncation_strategy: Optional[TruncationObject] = None - """Controls for how a thread will be truncated prior to the run. - - Use this to control the intial context window of the run. - """ - - usage: Optional[Usage] = None - """Usage statistics related to the run. - - This value will be `null` if the run is not in a terminal state (i.e. - `in_progress`, `queued`, etc.). - """ - - temperature: Optional[float] = None - """The sampling temperature used for this run. If not set, defaults to 1.""" - - top_p: Optional[float] = None - """The nucleus sampling value used for this run. If not set, defaults to 1.""" diff --git a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py b/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py deleted file mode 100644 index 77ab84ba..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_submit_tool_outputs_params.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["RunSubmitToolOutputsParams", "ToolOutput"] - - -class RunSubmitToolOutputsParams(TypedDict, total=False): - thread_id: Required[str] - - tool_outputs: Required[Iterable[ToolOutput]] - """A list of tools for which the outputs are being submitted.""" - - stream: Optional[bool] - """ - If `true`, returns a stream of events that happen during the Run as server-sent - events, terminating when the Run enters a terminal state with a `data: [DONE]` - message. - """ - - -class ToolOutput(TypedDict, total=False): - output: str - """The output of the tool call to be submitted to continue the run.""" - - tool_call_id: str - """ - The ID of the tool call in the `required_action` object within the run object - the output is being submitted for. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/run_update_params.py b/src/digitalocean_genai_sdk/types/threads/run_update_params.py deleted file mode 100644 index 7b84a9b5..00000000 --- a/src/digitalocean_genai_sdk/types/threads/run_update_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["RunUpdateParams"] - - -class RunUpdateParams(TypedDict, total=False): - thread_id: Required[str] - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py index 3cab1f9c..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py +++ b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py @@ -1,8 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .run_step_object import RunStepObject as RunStepObject -from .step_list_params import StepListParams as StepListParams -from .step_list_response import StepListResponse as StepListResponse -from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams diff --git a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py b/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py deleted file mode 100644 index 3ede68fa..00000000 --- a/src/digitalocean_genai_sdk/types/threads/runs/run_step_object.py +++ /dev/null @@ -1,323 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from ...._models import BaseModel -from ...file_search_ranker import FileSearchRanker - -__all__ = [ - "RunStepObject", - "LastError", - "StepDetails", - "StepDetailsRunStepDetailsMessageCreationObject", - "StepDetailsRunStepDetailsMessageCreationObjectMessageCreation", - "StepDetailsRunStepDetailsToolCallsObject", - "StepDetailsRunStepDetailsToolCallsObjectToolCall", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject", - "StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction", - "Usage", -] - - -class LastError(BaseModel): - code: Literal["server_error", "rate_limit_exceeded"] - """One of `server_error` or `rate_limit_exceeded`.""" - - message: str - """A human-readable description of the error.""" - - -class StepDetailsRunStepDetailsMessageCreationObjectMessageCreation(BaseModel): - message_id: str - """The ID of the message that was created by this run step.""" - - -class StepDetailsRunStepDetailsMessageCreationObject(BaseModel): - message_creation: StepDetailsRunStepDetailsMessageCreationObjectMessageCreation - - type: Literal["message_creation"] - """Always `message_creation`.""" - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject( - BaseModel -): - logs: str - """The text output from the Code Interpreter tool call.""" - - type: Literal["logs"] - """Always `logs`.""" - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage( - BaseModel -): - file_id: str - """The [file](/docs/api-reference/files) ID of the image.""" - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject( - BaseModel -): - image: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObjectImage - - type: Literal["image"] - """Always `image`.""" - - -StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput: TypeAlias = Union[ - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputLogsObject, - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutputRunStepDetailsToolCallsCodeOutputImageObject, -] - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter(BaseModel): - input: str - """The input to the Code Interpreter tool call.""" - - outputs: List[ - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreterOutput - ] - """The outputs from the Code Interpreter tool call. - - Code Interpreter can output one or more items, including text (`logs`) or images - (`image`). Each of these are represented by a different object type. - """ - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject(BaseModel): - id: str - """The ID of the tool call.""" - - code_interpreter: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObjectCodeInterpreter - """The Code Interpreter tool call definition.""" - - type: Literal["code_interpreter"] - """The type of tool call. - - This is always going to be `code_interpreter` for this type of tool call. - """ - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions( - BaseModel -): - ranker: FileSearchRanker - """The ranker to use for the file search. - - If not specified will use the `auto` ranker. - """ - - score_threshold: float - """The score threshold for the file search. - - All values must be a floating point number between 0 and 1. - """ - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent( - BaseModel -): - text: Optional[str] = None - """The text content of the file.""" - - type: Optional[Literal["text"]] = None - """The type of the content.""" - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult( - BaseModel -): - file_id: str - """The ID of the file that result was found in.""" - - file_name: str - """The name of the file that result was found in.""" - - score: float - """The score of the result. - - All values must be a floating point number between 0 and 1. - """ - - content: Optional[ - List[ - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResultContent - ] - ] = None - """The content of the result that was found. - - The content is only included if requested via the include query parameter. - """ - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch(BaseModel): - ranking_options: Optional[ - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchRankingOptions - ] = None - """The ranking options for the file search.""" - - results: Optional[ - List[StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearchResult] - ] = None - """The results of the file search.""" - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject(BaseModel): - id: str - """The ID of the tool call object.""" - - file_search: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObjectFileSearch - """For now, this is always going to be an empty object.""" - - type: Literal["file_search"] - """The type of tool call. - - This is always going to be `file_search` for this type of tool call. - """ - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction(BaseModel): - arguments: str - """The arguments passed to the function.""" - - name: str - """The name of the function.""" - - output: Optional[str] = None - """The output of the function. - - This will be `null` if the outputs have not been - [submitted](/docs/api-reference/runs/submitToolOutputs) yet. - """ - - -class StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject(BaseModel): - id: str - """The ID of the tool call object.""" - - function: StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObjectFunction - """The definition of the function that was called.""" - - type: Literal["function"] - """The type of tool call. - - This is always going to be `function` for this type of tool call. - """ - - -StepDetailsRunStepDetailsToolCallsObjectToolCall: TypeAlias = Union[ - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsCodeObject, - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFileSearchObject, - StepDetailsRunStepDetailsToolCallsObjectToolCallRunStepDetailsToolCallsFunctionObject, -] - - -class StepDetailsRunStepDetailsToolCallsObject(BaseModel): - tool_calls: List[StepDetailsRunStepDetailsToolCallsObjectToolCall] - """An array of tool calls the run step was involved in. - - These can be associated with one of three types of tools: `code_interpreter`, - `file_search`, or `function`. - """ - - type: Literal["tool_calls"] - """Always `tool_calls`.""" - - -StepDetails: TypeAlias = Union[StepDetailsRunStepDetailsMessageCreationObject, StepDetailsRunStepDetailsToolCallsObject] - - -class Usage(BaseModel): - completion_tokens: int - """Number of completion tokens used over the course of the run step.""" - - prompt_tokens: int - """Number of prompt tokens used over the course of the run step.""" - - total_tokens: int - """Total number of tokens used (prompt + completion).""" - - -class RunStepObject(BaseModel): - id: str - """The identifier of the run step, which can be referenced in API endpoints.""" - - assistant_id: str - """ - The ID of the [assistant](/docs/api-reference/assistants) associated with the - run step. - """ - - cancelled_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run step was cancelled.""" - - completed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run step completed.""" - - created_at: int - """The Unix timestamp (in seconds) for when the run step was created.""" - - expired_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run step expired. - - A step is considered expired if the parent run is expired. - """ - - failed_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the run step failed.""" - - last_error: Optional[LastError] = None - """The last error associated with this run step. - - Will be `null` if there are no errors. - """ - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - object: Literal["thread.run.step"] - """The object type, which is always `thread.run.step`.""" - - run_id: str - """The ID of the [run](/docs/api-reference/runs) that this run step is a part of.""" - - status: Literal["in_progress", "cancelled", "failed", "completed", "expired"] - """ - The status of the run step, which can be either `in_progress`, `cancelled`, - `failed`, `completed`, or `expired`. - """ - - step_details: StepDetails - """The details of the run step.""" - - thread_id: str - """The ID of the [thread](/docs/api-reference/threads) that was run.""" - - type: Literal["message_creation", "tool_calls"] - """The type of run step, which can be either `message_creation` or `tool_calls`.""" - - usage: Optional[Usage] = None - """Usage statistics related to the run step. - - This value will be `null` while the run step's status is `in_progress`. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py deleted file mode 100644 index 6383fcb3..00000000 --- a/src/digitalocean_genai_sdk/types/threads/runs/step_list_params.py +++ /dev/null @@ -1,54 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["StepListParams"] - - -class StepListParams(TypedDict, total=False): - thread_id: Required[str] - - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] - """A list of additional fields to include in the response. - - Currently the only supported value is - `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py b/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py deleted file mode 100644 index 93ccb4ca..00000000 --- a/src/digitalocean_genai_sdk/types/threads/runs/step_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ...._models import BaseModel -from .run_step_object import RunStepObject - -__all__ = ["StepListResponse"] - - -class StepListResponse(BaseModel): - data: List[RunStepObject] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py b/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py deleted file mode 100644 index ce6bcbfb..00000000 --- a/src/digitalocean_genai_sdk/types/threads/runs/step_retrieve_params.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["StepRetrieveParams"] - - -class StepRetrieveParams(TypedDict, total=False): - thread_id: Required[str] - - run_id: Required[str] - - include: List[Literal["step_details.tool_calls[*].file_search.results[*].content"]] - """A list of additional fields to include in the response. - - Currently the only supported value is - `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - search result content. - - See the - [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) - for more information. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object.py b/src/digitalocean_genai_sdk/types/threads/truncation_object.py deleted file mode 100644 index 7c81b3b5..00000000 --- a/src/digitalocean_genai_sdk/types/threads/truncation_object.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["TruncationObject"] - - -class TruncationObject(BaseModel): - type: Literal["auto", "last_messages"] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] = None - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ diff --git a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py b/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py deleted file mode 100644 index 98d942fa..00000000 --- a/src/digitalocean_genai_sdk/types/threads/truncation_object_param.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["TruncationObjectParam"] - - -class TruncationObjectParam(TypedDict, total=False): - type: Required[Literal["auto", "last_messages"]] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ diff --git a/src/digitalocean_genai_sdk/types/transcription_segment.py b/src/digitalocean_genai_sdk/types/transcription_segment.py deleted file mode 100644 index 2345fa18..00000000 --- a/src/digitalocean_genai_sdk/types/transcription_segment.py +++ /dev/null @@ -1,49 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel - -__all__ = ["TranscriptionSegment"] - - -class TranscriptionSegment(BaseModel): - id: int - """Unique identifier of the segment.""" - - avg_logprob: float - """Average logprob of the segment. - - If the value is lower than -1, consider the logprobs failed. - """ - - compression_ratio: float - """Compression ratio of the segment. - - If the value is greater than 2.4, consider the compression failed. - """ - - end: float - """End time of the segment in seconds.""" - - no_speech_prob: float - """Probability of no speech in the segment. - - If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this - segment silent. - """ - - seek: int - """Seek offset of the segment.""" - - start: float - """Start time of the segment in seconds.""" - - temperature: float - """Temperature parameter used for generating the segment.""" - - text: str - """Text content of the segment.""" - - tokens: List[int] - """Array of token IDs for the text content.""" diff --git a/src/digitalocean_genai_sdk/types/upload.py b/src/digitalocean_genai_sdk/types/upload.py deleted file mode 100644 index 06b8a806..00000000 --- a/src/digitalocean_genai_sdk/types/upload.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .openai_file import OpenAIFile - -__all__ = ["Upload"] - - -class Upload(BaseModel): - id: str - """The Upload unique identifier, which can be referenced in API endpoints.""" - - bytes: int - """The intended number of bytes to be uploaded.""" - - created_at: int - """The Unix timestamp (in seconds) for when the Upload was created.""" - - expires_at: int - """The Unix timestamp (in seconds) for when the Upload will expire.""" - - filename: str - """The name of the file to be uploaded.""" - - purpose: str - """The intended purpose of the file. - - [Please refer here](/docs/api-reference/files/object#files/object-purpose) for - acceptable values. - """ - - status: Literal["pending", "completed", "cancelled", "expired"] - """The status of the Upload.""" - - file: Optional[OpenAIFile] = None - """The `File` object represents a document that has been uploaded to OpenAI.""" - - object: Optional[Literal["upload"]] = None - """The object type, which is always "upload".""" diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_params.py b/src/digitalocean_genai_sdk/types/upload_add_part_params.py deleted file mode 100644 index a0c8b61c..00000000 --- a/src/digitalocean_genai_sdk/types/upload_add_part_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -from .._types import FileTypes - -__all__ = ["UploadAddPartParams"] - - -class UploadAddPartParams(TypedDict, total=False): - data: Required[FileTypes] - """The chunk of bytes for this Part.""" diff --git a/src/digitalocean_genai_sdk/types/upload_add_part_response.py b/src/digitalocean_genai_sdk/types/upload_add_part_response.py deleted file mode 100644 index fb091f76..00000000 --- a/src/digitalocean_genai_sdk/types/upload_add_part_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["UploadAddPartResponse"] - - -class UploadAddPartResponse(BaseModel): - id: str - """The upload Part unique identifier, which can be referenced in API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the Part was created.""" - - object: Literal["upload.part"] - """The object type, which is always `upload.part`.""" - - upload_id: str - """The ID of the Upload object that this Part was added to.""" diff --git a/src/digitalocean_genai_sdk/types/upload_complete_params.py b/src/digitalocean_genai_sdk/types/upload_complete_params.py deleted file mode 100644 index cce568d5..00000000 --- a/src/digitalocean_genai_sdk/types/upload_complete_params.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Required, TypedDict - -__all__ = ["UploadCompleteParams"] - - -class UploadCompleteParams(TypedDict, total=False): - part_ids: Required[List[str]] - """The ordered list of Part IDs.""" - - md5: str - """ - The optional md5 checksum for the file contents to verify if the bytes uploaded - matches what you expect. - """ diff --git a/src/digitalocean_genai_sdk/types/upload_create_params.py b/src/digitalocean_genai_sdk/types/upload_create_params.py deleted file mode 100644 index eab9a51b..00000000 --- a/src/digitalocean_genai_sdk/types/upload_create_params.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["UploadCreateParams"] - - -class UploadCreateParams(TypedDict, total=False): - bytes: Required[int] - """The number of bytes in the file you are uploading.""" - - filename: Required[str] - """The name of the file to upload.""" - - mime_type: Required[str] - """The MIME type of the file. - - This must fall within the supported MIME types for your file purpose. See the - supported MIME types for assistants and vision. - """ - - purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] - """The intended purpose of the uploaded file. - - See the - [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose). - """ diff --git a/src/digitalocean_genai_sdk/types/usage_response.py b/src/digitalocean_genai_sdk/types/usage_response.py deleted file mode 100644 index 9f70e7c4..00000000 --- a/src/digitalocean_genai_sdk/types/usage_response.py +++ /dev/null @@ -1,352 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias - -from .._models import BaseModel - -__all__ = [ - "UsageResponse", - "Data", - "DataResult", - "DataResultUsageCompletionsResult", - "DataResultUsageEmbeddingsResult", - "DataResultUsageModerationsResult", - "DataResultUsageImagesResult", - "DataResultUsageAudioSpeechesResult", - "DataResultUsageAudioTranscriptionsResult", - "DataResultUsageVectorStoresResult", - "DataResultUsageCodeInterpreterSessionsResult", - "DataResultCostsResult", - "DataResultCostsResultAmount", -] - - -class DataResultUsageCompletionsResult(BaseModel): - input_tokens: int - """The aggregated number of text input tokens used, including cached tokens. - - For customers subscribe to scale tier, this includes scale tier tokens. - """ - - num_model_requests: int - """The count of requests made to the model.""" - - object: Literal["organization.usage.completions.result"] - - output_tokens: int - """The aggregated number of text output tokens used. - - For customers subscribe to scale tier, this includes scale tier tokens. - """ - - api_key_id: Optional[str] = None - """ - When `group_by=api_key_id`, this field provides the API key ID of the grouped - usage result. - """ - - batch: Optional[bool] = None - """ - When `group_by=batch`, this field tells whether the grouped usage result is - batch or not. - """ - - input_audio_tokens: Optional[int] = None - """The aggregated number of audio input tokens used, including cached tokens.""" - - input_cached_tokens: Optional[int] = None - """ - The aggregated number of text input tokens that has been cached from previous - requests. For customers subscribe to scale tier, this includes scale tier - tokens. - """ - - model: Optional[str] = None - """ - When `group_by=model`, this field provides the model name of the grouped usage - result. - """ - - output_audio_tokens: Optional[int] = None - """The aggregated number of audio output tokens used.""" - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - user_id: Optional[str] = None - """ - When `group_by=user_id`, this field provides the user ID of the grouped usage - result. - """ - - -class DataResultUsageEmbeddingsResult(BaseModel): - input_tokens: int - """The aggregated number of input tokens used.""" - - num_model_requests: int - """The count of requests made to the model.""" - - object: Literal["organization.usage.embeddings.result"] - - api_key_id: Optional[str] = None - """ - When `group_by=api_key_id`, this field provides the API key ID of the grouped - usage result. - """ - - model: Optional[str] = None - """ - When `group_by=model`, this field provides the model name of the grouped usage - result. - """ - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - user_id: Optional[str] = None - """ - When `group_by=user_id`, this field provides the user ID of the grouped usage - result. - """ - - -class DataResultUsageModerationsResult(BaseModel): - input_tokens: int - """The aggregated number of input tokens used.""" - - num_model_requests: int - """The count of requests made to the model.""" - - object: Literal["organization.usage.moderations.result"] - - api_key_id: Optional[str] = None - """ - When `group_by=api_key_id`, this field provides the API key ID of the grouped - usage result. - """ - - model: Optional[str] = None - """ - When `group_by=model`, this field provides the model name of the grouped usage - result. - """ - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - user_id: Optional[str] = None - """ - When `group_by=user_id`, this field provides the user ID of the grouped usage - result. - """ - - -class DataResultUsageImagesResult(BaseModel): - images: int - """The number of images processed.""" - - num_model_requests: int - """The count of requests made to the model.""" - - object: Literal["organization.usage.images.result"] - - api_key_id: Optional[str] = None - """ - When `group_by=api_key_id`, this field provides the API key ID of the grouped - usage result. - """ - - model: Optional[str] = None - """ - When `group_by=model`, this field provides the model name of the grouped usage - result. - """ - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - size: Optional[str] = None - """ - When `group_by=size`, this field provides the image size of the grouped usage - result. - """ - - source: Optional[str] = None - """ - When `group_by=source`, this field provides the source of the grouped usage - result, possible values are `image.generation`, `image.edit`, `image.variation`. - """ - - user_id: Optional[str] = None - """ - When `group_by=user_id`, this field provides the user ID of the grouped usage - result. - """ - - -class DataResultUsageAudioSpeechesResult(BaseModel): - characters: int - """The number of characters processed.""" - - num_model_requests: int - """The count of requests made to the model.""" - - object: Literal["organization.usage.audio_speeches.result"] - - api_key_id: Optional[str] = None - """ - When `group_by=api_key_id`, this field provides the API key ID of the grouped - usage result. - """ - - model: Optional[str] = None - """ - When `group_by=model`, this field provides the model name of the grouped usage - result. - """ - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - user_id: Optional[str] = None - """ - When `group_by=user_id`, this field provides the user ID of the grouped usage - result. - """ - - -class DataResultUsageAudioTranscriptionsResult(BaseModel): - num_model_requests: int - """The count of requests made to the model.""" - - object: Literal["organization.usage.audio_transcriptions.result"] - - seconds: int - """The number of seconds processed.""" - - api_key_id: Optional[str] = None - """ - When `group_by=api_key_id`, this field provides the API key ID of the grouped - usage result. - """ - - model: Optional[str] = None - """ - When `group_by=model`, this field provides the model name of the grouped usage - result. - """ - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - user_id: Optional[str] = None - """ - When `group_by=user_id`, this field provides the user ID of the grouped usage - result. - """ - - -class DataResultUsageVectorStoresResult(BaseModel): - object: Literal["organization.usage.vector_stores.result"] - - usage_bytes: int - """The vector stores usage in bytes.""" - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - -class DataResultUsageCodeInterpreterSessionsResult(BaseModel): - object: Literal["organization.usage.code_interpreter_sessions.result"] - - num_sessions: Optional[int] = None - """The number of code interpreter sessions.""" - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - usage result. - """ - - -class DataResultCostsResultAmount(BaseModel): - currency: Optional[str] = None - """Lowercase ISO-4217 currency e.g. "usd" """ - - value: Optional[float] = None - """The numeric value of the cost.""" - - -class DataResultCostsResult(BaseModel): - object: Literal["organization.costs.result"] - - amount: Optional[DataResultCostsResultAmount] = None - """The monetary value in its associated currency.""" - - line_item: Optional[str] = None - """ - When `group_by=line_item`, this field provides the line item of the grouped - costs result. - """ - - project_id: Optional[str] = None - """ - When `group_by=project_id`, this field provides the project ID of the grouped - costs result. - """ - - -DataResult: TypeAlias = Union[ - DataResultUsageCompletionsResult, - DataResultUsageEmbeddingsResult, - DataResultUsageModerationsResult, - DataResultUsageImagesResult, - DataResultUsageAudioSpeechesResult, - DataResultUsageAudioTranscriptionsResult, - DataResultUsageVectorStoresResult, - DataResultUsageCodeInterpreterSessionsResult, - DataResultCostsResult, -] - - -class Data(BaseModel): - end_time: int - - object: Literal["bucket"] - - result: List[DataResult] - - start_time: int - - -class UsageResponse(BaseModel): - data: List[Data] - - has_more: bool - - next_page: str - - object: Literal["page"] diff --git a/src/digitalocean_genai_sdk/types/vector_store_create_params.py b/src/digitalocean_genai_sdk/types/vector_store_create_params.py deleted file mode 100644 index 48118e80..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_create_params.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Optional -from typing_extensions import TypeAlias, TypedDict - -from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam -from .auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam -from .static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam - -__all__ = ["VectorStoreCreateParams", "ChunkingStrategy"] - - -class VectorStoreCreateParams(TypedDict, total=False): - chunking_strategy: ChunkingStrategy - """The chunking strategy used to chunk the file(s). - - If not set, will use the `auto` strategy. Only applicable if `file_ids` is - non-empty. - """ - - expires_after: VectorStoreExpirationAfterParam - """The expiration policy for a vector store.""" - - file_ids: List[str] - """A list of [File](/docs/api-reference/files) IDs that the vector store should - use. - - Useful for tools like `file_search` that can access files. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - name: str - """The name of the vector store.""" - - -ChunkingStrategy: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam] diff --git a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py b/src/digitalocean_genai_sdk/types/vector_store_delete_response.py deleted file mode 100644 index 17d3ee21..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["VectorStoreDeleteResponse"] - - -class VectorStoreDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["vector_store.deleted"] diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py deleted file mode 100644 index 1d417d52..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_expiration_after.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["VectorStoreExpirationAfter"] - - -class VectorStoreExpirationAfter(BaseModel): - anchor: Literal["last_active_at"] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: int - """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py b/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py deleted file mode 100644 index 29a008c7..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_expiration_after_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["VectorStoreExpirationAfterParam"] - - -class VectorStoreExpirationAfterParam(TypedDict, total=False): - anchor: Required[Literal["last_active_at"]] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: Required[int] - """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_params.py b/src/digitalocean_genai_sdk/types/vector_store_list_params.py deleted file mode 100644 index e26ff90a..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_list_params.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["VectorStoreListParams"] - - -class VectorStoreListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_store_list_response.py b/src/digitalocean_genai_sdk/types/vector_store_list_response.py deleted file mode 100644 index 2dc455ea..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_list_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from .._models import BaseModel -from .vector_store_object import VectorStoreObject - -__all__ = ["VectorStoreListResponse"] - - -class VectorStoreListResponse(BaseModel): - data: List[VectorStoreObject] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/vector_store_object.py b/src/digitalocean_genai_sdk/types/vector_store_object.py deleted file mode 100644 index ebd52a31..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_object.py +++ /dev/null @@ -1,71 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .vector_store_expiration_after import VectorStoreExpirationAfter - -__all__ = ["VectorStoreObject", "FileCounts"] - - -class FileCounts(BaseModel): - cancelled: int - """The number of files that were cancelled.""" - - completed: int - """The number of files that have been successfully processed.""" - - failed: int - """The number of files that have failed to process.""" - - in_progress: int - """The number of files that are currently being processed.""" - - total: int - """The total number of files.""" - - -class VectorStoreObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the vector store was created.""" - - file_counts: FileCounts - - last_active_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the vector store was last active.""" - - metadata: Optional[Dict[str, str]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - name: str - """The name of the vector store.""" - - object: Literal["vector_store"] - """The object type, which is always `vector_store`.""" - - status: Literal["expired", "in_progress", "completed"] - """ - The status of the vector store, which can be either `expired`, `in_progress`, or - `completed`. A status of `completed` indicates that the vector store is ready - for use. - """ - - usage_bytes: int - """The total number of bytes used by the files in the vector store.""" - - expires_after: Optional[VectorStoreExpirationAfter] = None - """The expiration policy for a vector store.""" - - expires_at: Optional[int] = None - """The Unix timestamp (in seconds) for when the vector store will expire.""" diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_params.py b/src/digitalocean_genai_sdk/types/vector_store_search_params.py deleted file mode 100644 index 5b90b063..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_search_params.py +++ /dev/null @@ -1,40 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .compound_filter_param import CompoundFilterParam -from .comparison_filter_param import ComparisonFilterParam - -__all__ = ["VectorStoreSearchParams", "Filters", "RankingOptions"] - - -class VectorStoreSearchParams(TypedDict, total=False): - query: Required[Union[str, List[str]]] - """A query string for a search""" - - filters: Filters - """A filter to apply based on file attributes.""" - - max_num_results: int - """The maximum number of results to return. - - This number should be between 1 and 50 inclusive. - """ - - ranking_options: RankingOptions - """Ranking options for search.""" - - rewrite_query: bool - """Whether to rewrite the natural language query for vector search.""" - - -Filters: TypeAlias = Union[ComparisonFilterParam, CompoundFilterParam] - - -class RankingOptions(TypedDict, total=False): - ranker: Literal["auto", "default-2024-11-15"] - - score_threshold: float diff --git a/src/digitalocean_genai_sdk/types/vector_store_search_response.py b/src/digitalocean_genai_sdk/types/vector_store_search_response.py deleted file mode 100644 index b303f7ea..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_search_response.py +++ /dev/null @@ -1,55 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Union, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["VectorStoreSearchResponse", "Data", "DataContent"] - - -class DataContent(BaseModel): - text: str - """The text content returned from search.""" - - type: Literal["text"] - """The type of content.""" - - -class Data(BaseModel): - attributes: Optional[Dict[str, Union[str, float, bool]]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ - - content: List[DataContent] - """Content chunks from the file.""" - - file_id: str - """The ID of the vector store file.""" - - filename: str - """The name of the vector store file.""" - - score: float - """The similarity score for the result.""" - - -class VectorStoreSearchResponse(BaseModel): - data: List[Data] - """The list of search result items.""" - - has_more: bool - """Indicates if there are more results to fetch.""" - - next_page: Optional[str] = None - """The token for the next page, if any.""" - - object: Literal["vector_store.search_results.page"] - """The object type, which is always `vector_store.search_results.page`""" - - search_query: List[str] diff --git a/src/digitalocean_genai_sdk/types/vector_store_update_params.py b/src/digitalocean_genai_sdk/types/vector_store_update_params.py deleted file mode 100644 index a9400cf2..00000000 --- a/src/digitalocean_genai_sdk/types/vector_store_update_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Optional -from typing_extensions import TypedDict - -from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam - -__all__ = ["VectorStoreUpdateParams"] - - -class VectorStoreUpdateParams(TypedDict, total=False): - expires_after: Optional[VectorStoreExpirationAfterParam] - """The expiration policy for a vector store.""" - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - name: Optional[str] - """The name of the vector store.""" diff --git a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py index 5018f06d..f8ee8b14 100644 --- a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py +++ b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py @@ -1,15 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .file_list_params import FileListParams as FileListParams -from .file_create_params import FileCreateParams as FileCreateParams -from .file_update_params import FileUpdateParams as FileUpdateParams -from .file_delete_response import FileDeleteResponse as FileDeleteResponse -from .file_batch_create_params import FileBatchCreateParams as FileBatchCreateParams -from .vector_store_file_object import VectorStoreFileObject as VectorStoreFileObject -from .file_batch_list_files_params import FileBatchListFilesParams as FileBatchListFilesParams -from .file_retrieve_content_response import FileRetrieveContentResponse as FileRetrieveContentResponse -from .vector_store_file_batch_object import VectorStoreFileBatchObject as VectorStoreFileBatchObject -from .chunking_strategy_request_param import ChunkingStrategyRequestParam as ChunkingStrategyRequestParam -from .list_vector_store_files_response import ListVectorStoreFilesResponse as ListVectorStoreFilesResponse diff --git a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py b/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py deleted file mode 100644 index 1dab9558..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/chunking_strategy_request_param.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import TypeAlias - -from ..auto_chunking_strategy_request_param import AutoChunkingStrategyRequestParam -from ..static_chunking_strategy_request_param import StaticChunkingStrategyRequestParam - -__all__ = ["ChunkingStrategyRequestParam"] - -ChunkingStrategyRequestParam: TypeAlias = Union[AutoChunkingStrategyRequestParam, StaticChunkingStrategyRequestParam] diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py deleted file mode 100644 index 2e2bf227..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_create_params.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Optional -from typing_extensions import Required, TypedDict - -from .chunking_strategy_request_param import ChunkingStrategyRequestParam - -__all__ = ["FileBatchCreateParams"] - - -class FileBatchCreateParams(TypedDict, total=False): - file_ids: Required[List[str]] - """A list of [File](/docs/api-reference/files) IDs that the vector store should - use. - - Useful for tools like `file_search` that can access files. - """ - - attributes: Optional[Dict[str, Union[str, float, bool]]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ - - chunking_strategy: ChunkingStrategyRequestParam - """The chunking strategy used to chunk the file(s). - - If not set, will use the `auto` strategy. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py deleted file mode 100644 index 2a0a6c6a..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_batch_list_files_params.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["FileBatchListFilesParams"] - - -class FileBatchListFilesParams(TypedDict, total=False): - vector_store_id: Required[str] - - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - filter: Literal["in_progress", "completed", "failed", "cancelled"] - """Filter by file status. - - One of `in_progress`, `completed`, `failed`, `cancelled`. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py deleted file mode 100644 index 6183f4e7..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_create_params.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Optional -from typing_extensions import Required, TypedDict - -from .chunking_strategy_request_param import ChunkingStrategyRequestParam - -__all__ = ["FileCreateParams"] - - -class FileCreateParams(TypedDict, total=False): - file_id: Required[str] - """A [File](/docs/api-reference/files) ID that the vector store should use. - - Useful for tools like `file_search` that can access files. - """ - - attributes: Optional[Dict[str, Union[str, float, bool]]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ - - chunking_strategy: ChunkingStrategyRequestParam - """The chunking strategy used to chunk the file(s). - - If not set, will use the `auto` strategy. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py deleted file mode 100644 index 24fbe570..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_delete_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["FileDeleteResponse"] - - -class FileDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["vector_store.file.deleted"] diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py deleted file mode 100644 index 867b5fb3..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_list_params.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["FileListParams"] - - -class FileListParams(TypedDict, total=False): - after: str - """A cursor for use in pagination. - - `after` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your - subsequent call can include after=obj_foo in order to fetch the next page of the - list. - """ - - before: str - """A cursor for use in pagination. - - `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, starting with obj_foo, your - subsequent call can include before=obj_foo in order to fetch the previous page - of the list. - """ - - filter: Literal["in_progress", "completed", "failed", "cancelled"] - """Filter by file status. - - One of `in_progress`, `completed`, `failed`, `cancelled`. - """ - - limit: int - """A limit on the number of objects to be returned. - - Limit can range between 1 and 100, and the default is 20. - """ - - order: Literal["asc", "desc"] - """Sort order by the `created_at` timestamp of the objects. - - `asc` for ascending order and `desc` for descending order. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py b/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py deleted file mode 100644 index e4f0966c..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_retrieve_content_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["FileRetrieveContentResponse", "Data"] - - -class Data(BaseModel): - text: Optional[str] = None - """The text content""" - - type: Optional[str] = None - """The content type (currently only `"text"`)""" - - -class FileRetrieveContentResponse(BaseModel): - data: List[Data] - """Parsed content of the file.""" - - has_more: bool - """Indicates if there are more content pages to fetch.""" - - next_page: Optional[str] = None - """The token for the next page, if any.""" - - object: Literal["vector_store.file_content.page"] - """The object type, which is always `vector_store.file_content.page`""" diff --git a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py b/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py deleted file mode 100644 index ebf540d0..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/file_update_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Union, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["FileUpdateParams"] - - -class FileUpdateParams(TypedDict, total=False): - vector_store_id: Required[str] - - attributes: Required[Optional[Dict[str, Union[str, float, bool]]]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py b/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py deleted file mode 100644 index dc997962..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/list_vector_store_files_response.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel -from .vector_store_file_object import VectorStoreFileObject - -__all__ = ["ListVectorStoreFilesResponse"] - - -class ListVectorStoreFilesResponse(BaseModel): - data: List[VectorStoreFileObject] - - first_id: str - - has_more: bool - - last_id: str - - object: str diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py deleted file mode 100644 index 3d5aa1bd..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_batch_object.py +++ /dev/null @@ -1,52 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["VectorStoreFileBatchObject", "FileCounts"] - - -class FileCounts(BaseModel): - cancelled: int - """The number of files that where cancelled.""" - - completed: int - """The number of files that have been processed.""" - - failed: int - """The number of files that have failed to process.""" - - in_progress: int - """The number of files that are currently being processed.""" - - total: int - """The total number of files.""" - - -class VectorStoreFileBatchObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - created_at: int - """ - The Unix timestamp (in seconds) for when the vector store files batch was - created. - """ - - file_counts: FileCounts - - object: Literal["vector_store.files_batch"] - """The object type, which is always `vector_store.file_batch`.""" - - status: Literal["in_progress", "completed", "cancelled", "failed"] - """ - The status of the vector store files batch, which can be either `in_progress`, - `completed`, `cancelled` or `failed`. - """ - - vector_store_id: str - """ - The ID of the [vector store](/docs/api-reference/vector-stores/object) that the - [File](/docs/api-reference/files) is attached to. - """ diff --git a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py b/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py deleted file mode 100644 index e28e28a6..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/vector_store_file_object.py +++ /dev/null @@ -1,88 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Union, Optional -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel -from ..static_chunking_strategy import StaticChunkingStrategy - -__all__ = [ - "VectorStoreFileObject", - "LastError", - "ChunkingStrategy", - "ChunkingStrategyStaticChunkingStrategyResponseParam", - "ChunkingStrategyOtherChunkingStrategyResponseParam", -] - - -class LastError(BaseModel): - code: Literal["server_error", "unsupported_file", "invalid_file"] - """One of `server_error` or `rate_limit_exceeded`.""" - - message: str - """A human-readable description of the error.""" - - -class ChunkingStrategyStaticChunkingStrategyResponseParam(BaseModel): - static: StaticChunkingStrategy - - type: Literal["static"] - """Always `static`.""" - - -class ChunkingStrategyOtherChunkingStrategyResponseParam(BaseModel): - type: Literal["other"] - """Always `other`.""" - - -ChunkingStrategy: TypeAlias = Union[ - ChunkingStrategyStaticChunkingStrategyResponseParam, ChunkingStrategyOtherChunkingStrategyResponseParam -] - - -class VectorStoreFileObject(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - created_at: int - """The Unix timestamp (in seconds) for when the vector store file was created.""" - - last_error: Optional[LastError] = None - """The last error associated with this vector store file. - - Will be `null` if there are no errors. - """ - - object: Literal["vector_store.file"] - """The object type, which is always `vector_store.file`.""" - - status: Literal["in_progress", "completed", "cancelled", "failed"] - """ - The status of the vector store file, which can be either `in_progress`, - `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - vector store file is ready for use. - """ - - usage_bytes: int - """The total vector store usage in bytes. - - Note that this may be different from the original file size. - """ - - vector_store_id: str - """ - The ID of the [vector store](/docs/api-reference/vector-stores/object) that the - [File](/docs/api-reference/files) is attached to. - """ - - attributes: Optional[Dict[str, Union[str, float, bool]]] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. Keys are - strings with a maximum length of 64 characters. Values are strings with a - maximum length of 512 characters, booleans, or numbers. - """ - - chunking_strategy: Optional[ChunkingStrategy] = None - """The strategy used to chunk the file.""" diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared.py b/src/digitalocean_genai_sdk/types/voice_ids_shared.py deleted file mode 100644 index 5679bda3..00000000 --- a/src/digitalocean_genai_sdk/types/voice_ids_shared.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union -from typing_extensions import Literal, TypeAlias - -__all__ = ["VoiceIDsShared"] - -VoiceIDsShared: TypeAlias = Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] -] diff --git a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py b/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py deleted file mode 100644 index ccbd853d..00000000 --- a/src/digitalocean_genai_sdk/types/voice_ids_shared_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, TypeAlias - -__all__ = ["VoiceIDsSharedParam"] - -VoiceIDsSharedParam: TypeAlias = Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] -] diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call.py b/src/digitalocean_genai_sdk/types/web_search_tool_call.py deleted file mode 100644 index 1b57ab87..00000000 --- a/src/digitalocean_genai_sdk/types/web_search_tool_call.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["WebSearchToolCall"] - - -class WebSearchToolCall(BaseModel): - id: str - """The unique ID of the web search tool call.""" - - status: Literal["in_progress", "searching", "completed", "failed"] - """The status of the web search tool call.""" - - type: Literal["web_search_call"] - """The type of the web search tool call. Always `web_search_call`.""" diff --git a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py b/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py deleted file mode 100644 index 39e5c502..00000000 --- a/src/digitalocean_genai_sdk/types/web_search_tool_call_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["WebSearchToolCallParam"] - - -class WebSearchToolCallParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the web search tool call.""" - - status: Required[Literal["in_progress", "searching", "completed", "failed"]] - """The status of the web search tool call.""" - - type: Required[Literal["web_search_call"]] - """The type of the web search tool call. Always `web_search_call`.""" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index b065b83d..9c7ff505 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -9,12 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.chat import ( - CreateResponse, - CompletionListResponse, - CompletionDeleteResponse, - CompletionListMessagesResponse, -) +from digitalocean_genai_sdk.types.chat import CreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -29,10 +24,10 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: messages=[ { "content": "string", - "role": "developer", + "role": "system", } ], - model="gpt-4o", + model="llama3-8b-instruct", ) assert_matches_type(CreateResponse, completion, path=["response"]) @@ -43,73 +38,25 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No messages=[ { "content": "string", - "role": "developer", - "name": "name", + "role": "system", } ], - model="gpt-4o", - audio={ - "format": "wav", - "voice": "ash", - }, + model="llama3-8b-instruct", frequency_penalty=-2, - function_call="none", - functions=[ - { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - } - ], logit_bias={"foo": 0}, logprobs=True, - max_completion_tokens=0, + max_completion_tokens=256, max_tokens=0, metadata={"foo": "string"}, - modalities=["text"], n=1, - parallel_tool_calls=True, - prediction={ - "content": "string", - "type": "content", - }, presence_penalty=-2, - reasoning_effort="low", - response_format={"type": "text"}, - seed=0, - service_tier="auto", stop="\n", - store=True, stream=True, stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", - web_search_options={ - "search_context_size": "low", - "user_location": { - "approximate": { - "city": "city", - "country": "country", - "region": "region", - "timezone": "timezone", - }, - "type": "approximate", - }, - }, ) assert_matches_type(CreateResponse, completion, path=["response"]) @@ -120,10 +67,10 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: messages=[ { "content": "string", - "role": "developer", + "role": "system", } ], - model="gpt-4o", + model="llama3-8b-instruct", ) assert response.is_closed is True @@ -138,89 +85,10 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: messages=[ { "content": "string", - "role": "developer", + "role": "system", } ], - model="gpt-4o", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.retrieve( - "completion_id", - ) - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.chat.completions.with_raw_response.retrieve( - "completion_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.chat.completions.with_streaming_response.retrieve( - "completion_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - client.chat.completions.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.update( - completion_id="completion_id", - metadata={"foo": "string"}, - ) - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.chat.completions.with_raw_response.update( - completion_id="completion_id", - metadata={"foo": "string"}, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.chat.completions.with_streaming_response.update( - completion_id="completion_id", - metadata={"foo": "string"}, + model="llama3-8b-instruct", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -230,150 +98,6 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - client.chat.completions.with_raw_response.update( - completion_id="", - metadata={"foo": "string"}, - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.list() - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.list( - after="after", - limit=0, - metadata={"foo": "string"}, - model="model", - order="asc", - ) - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.chat.completions.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.chat.completions.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.delete( - "completion_id", - ) - assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.chat.completions.with_raw_response.delete( - "completion_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.chat.completions.with_streaming_response.delete( - "completion_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = response.parse() - assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - client.chat.completions.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_messages(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.list_messages( - completion_id="completion_id", - ) - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_messages_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.list_messages( - completion_id="completion_id", - after="after", - limit=0, - order="asc", - ) - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None: - response = client.chat.completions.with_raw_response.list_messages( - completion_id="completion_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_messages(self, client: DigitaloceanGenaiSDK) -> None: - with client.chat.completions.with_streaming_response.list_messages( - completion_id="completion_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = response.parse() - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_messages(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - client.chat.completions.with_raw_response.list_messages( - completion_id="", - ) - class TestAsyncCompletions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -385,10 +109,10 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N messages=[ { "content": "string", - "role": "developer", + "role": "system", } ], - model="gpt-4o", + model="llama3-8b-instruct", ) assert_matches_type(CreateResponse, completion, path=["response"]) @@ -399,73 +123,25 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce messages=[ { "content": "string", - "role": "developer", - "name": "name", + "role": "system", } ], - model="gpt-4o", - audio={ - "format": "wav", - "voice": "ash", - }, + model="llama3-8b-instruct", frequency_penalty=-2, - function_call="none", - functions=[ - { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - } - ], logit_bias={"foo": 0}, logprobs=True, - max_completion_tokens=0, + max_completion_tokens=256, max_tokens=0, metadata={"foo": "string"}, - modalities=["text"], n=1, - parallel_tool_calls=True, - prediction={ - "content": "string", - "type": "content", - }, presence_penalty=-2, - reasoning_effort="low", - response_format={"type": "text"}, - seed=0, - service_tier="auto", stop="\n", - store=True, stream=True, stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - "strict": True, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", - web_search_options={ - "search_context_size": "low", - "user_location": { - "approximate": { - "city": "city", - "country": "country", - "region": "region", - "timezone": "timezone", - }, - "type": "approximate", - }, - }, ) assert_matches_type(CreateResponse, completion, path=["response"]) @@ -476,10 +152,10 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK messages=[ { "content": "string", - "role": "developer", + "role": "system", } ], - model="gpt-4o", + model="llama3-8b-instruct", ) assert response.is_closed is True @@ -494,89 +170,10 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe messages=[ { "content": "string", - "role": "developer", + "role": "system", } ], - model="gpt-4o", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = await response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.retrieve( - "completion_id", - ) - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.chat.completions.with_raw_response.retrieve( - "completion_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.chat.completions.with_streaming_response.retrieve( - "completion_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = await response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - await async_client.chat.completions.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.update( - completion_id="completion_id", - metadata={"foo": "string"}, - ) - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.chat.completions.with_raw_response.update( - completion_id="completion_id", - metadata={"foo": "string"}, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.chat.completions.with_streaming_response.update( - completion_id="completion_id", - metadata={"foo": "string"}, + model="llama3-8b-instruct", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -585,147 +182,3 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe assert_matches_type(CreateResponse, completion, path=["response"]) assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - await async_client.chat.completions.with_raw_response.update( - completion_id="", - metadata={"foo": "string"}, - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.list() - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.list( - after="after", - limit=0, - metadata={"foo": "string"}, - model="model", - order="asc", - ) - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.chat.completions.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.chat.completions.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = await response.parse() - assert_matches_type(CompletionListResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.delete( - "completion_id", - ) - assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.chat.completions.with_raw_response.delete( - "completion_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.chat.completions.with_streaming_response.delete( - "completion_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = await response.parse() - assert_matches_type(CompletionDeleteResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - await async_client.chat.completions.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.list_messages( - completion_id="completion_id", - ) - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_messages_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.list_messages( - completion_id="completion_id", - after="after", - limit=0, - order="asc", - ) - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.chat.completions.with_raw_response.list_messages( - completion_id="completion_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.chat.completions.with_streaming_response.list_messages( - completion_id="completion_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = await response.parse() - assert_matches_type(CompletionListMessagesResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_messages(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"): - await async_client.chat.completions.with_raw_response.list_messages( - completion_id="", - ) diff --git a/tests/api_resources/fine_tuning/__init__.py b/tests/api_resources/fine_tuning/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/fine_tuning/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/fine_tuning/checkpoints/__init__.py b/tests/api_resources/fine_tuning/checkpoints/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/fine_tuning/checkpoints/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py deleted file mode 100644 index 1983d90a..00000000 --- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py +++ /dev/null @@ -1,309 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.fine_tuning.checkpoints import ( - PermissionDeleteResponse, - ListFineTuningCheckpointPermission, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestPermissions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - permission = client.fine_tuning.checkpoints.permissions.create( - permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", - project_ids=["string"], - ) - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.checkpoints.permissions.with_raw_response.create( - permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", - project_ids=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - permission = response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.checkpoints.permissions.with_streaming_response.create( - permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", - project_ids=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - permission = response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): - client.fine_tuning.checkpoints.permissions.with_raw_response.create( - permission_id="", - project_ids=["string"], - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - permission = client.fine_tuning.checkpoints.permissions.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - permission = client.fine_tuning.checkpoints.permissions.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="after", - limit=0, - order="ascending", - project_id="project_id", - ) - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - permission = response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - permission = response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): - client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( - permission_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - permission = client.fine_tuning.checkpoints.permissions.delete( - "cp_zc4Q7MP6XxulcVzj4MZdwsAB", - ) - assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete( - "cp_zc4Q7MP6XxulcVzj4MZdwsAB", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - permission = response.parse() - assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete( - "cp_zc4Q7MP6XxulcVzj4MZdwsAB", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - permission = response.parse() - assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): - client.fine_tuning.checkpoints.permissions.with_raw_response.delete( - "", - ) - - -class TestAsyncPermissions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - permission = await async_client.fine_tuning.checkpoints.permissions.create( - permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", - project_ids=["string"], - ) - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create( - permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", - project_ids=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - permission = await response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.create( - permission_id="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", - project_ids=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - permission = await response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): - await async_client.fine_tuning.checkpoints.permissions.with_raw_response.create( - permission_id="", - project_ids=["string"], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - permission = await async_client.fine_tuning.checkpoints.permissions.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - permission = await async_client.fine_tuning.checkpoints.permissions.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="after", - limit=0, - order="ascending", - project_id="project_id", - ) - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - permission = await response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve( - permission_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - permission = await response.parse() - assert_matches_type(ListFineTuningCheckpointPermission, permission, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): - await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve( - permission_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - permission = await async_client.fine_tuning.checkpoints.permissions.delete( - "cp_zc4Q7MP6XxulcVzj4MZdwsAB", - ) - assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete( - "cp_zc4Q7MP6XxulcVzj4MZdwsAB", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - permission = await response.parse() - assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete( - "cp_zc4Q7MP6XxulcVzj4MZdwsAB", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - permission = await response.parse() - assert_matches_type(PermissionDeleteResponse, permission, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"): - await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/fine_tuning/jobs/__init__.py b/tests/api_resources/fine_tuning/jobs/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/fine_tuning/jobs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py deleted file mode 100644 index f94416f9..00000000 --- a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py +++ /dev/null @@ -1,126 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.fine_tuning.jobs import CheckpointRetrieveResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestCheckpoints: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - checkpoint = client.fine_tuning.jobs.checkpoints.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - checkpoint = client.fine_tuning.jobs.checkpoints.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="after", - limit=0, - ) - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - checkpoint = response.parse() - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - checkpoint = response.parse() - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( - fine_tuning_job_id="", - ) - - -class TestAsyncCheckpoints: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - checkpoint = await async_client.fine_tuning.jobs.checkpoints.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="after", - limit=0, - ) - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - checkpoint = await response.parse() - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.jobs.checkpoints.with_streaming_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - checkpoint = await response.parse() - assert_matches_type(CheckpointRetrieveResponse, checkpoint, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - await async_client.fine_tuning.jobs.checkpoints.with_raw_response.retrieve( - fine_tuning_job_id="", - ) diff --git a/tests/api_resources/fine_tuning/jobs/test_events.py b/tests/api_resources/fine_tuning/jobs/test_events.py deleted file mode 100644 index 39802767..00000000 --- a/tests/api_resources/fine_tuning/jobs/test_events.py +++ /dev/null @@ -1,126 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.fine_tuning.jobs import EventRetrieveResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEvents: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - event = client.fine_tuning.jobs.events.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - event = client.fine_tuning.jobs.events.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="after", - limit=0, - ) - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.jobs.events.with_raw_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - event = response.parse() - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.jobs.events.with_streaming_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - event = response.parse() - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - client.fine_tuning.jobs.events.with_raw_response.retrieve( - fine_tuning_job_id="", - ) - - -class TestAsyncEvents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - event = await async_client.fine_tuning.jobs.events.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - event = await async_client.fine_tuning.jobs.events.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - after="after", - limit=0, - ) - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.jobs.events.with_raw_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - event = await response.parse() - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.jobs.events.with_streaming_response.retrieve( - fine_tuning_job_id="ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - event = await response.parse() - assert_matches_type(EventRetrieveResponse, event, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - await async_client.fine_tuning.jobs.events.with_raw_response.retrieve( - fine_tuning_job_id="", - ) diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py deleted file mode 100644 index f0014f09..00000000 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ /dev/null @@ -1,437 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.fine_tuning import ( - FineTuningJob, - JobListResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - job = client.fine_tuning.jobs.create( - model="gpt-4o-mini", - training_file="file-abc123", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - job = client.fine_tuning.jobs.create( - model="gpt-4o-mini", - training_file="file-abc123", - hyperparameters={ - "batch_size": "auto", - "learning_rate_multiplier": "auto", - "n_epochs": "auto", - }, - integrations=[ - { - "type": "wandb", - "wandb": { - "project": "my-wandb-project", - "entity": "entity", - "name": "name", - "tags": ["custom-tag"], - }, - } - ], - metadata={"foo": "string"}, - method={ - "dpo": { - "hyperparameters": { - "batch_size": "auto", - "beta": "auto", - "learning_rate_multiplier": "auto", - "n_epochs": "auto", - } - }, - "supervised": { - "hyperparameters": { - "batch_size": "auto", - "learning_rate_multiplier": "auto", - "n_epochs": "auto", - } - }, - "type": "supervised", - }, - seed=42, - suffix="x", - validation_file="file-abc123", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.jobs.with_raw_response.create( - model="gpt-4o-mini", - training_file="file-abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.jobs.with_streaming_response.create( - model="gpt-4o-mini", - training_file="file-abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - job = client.fine_tuning.jobs.retrieve( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.jobs.with_raw_response.retrieve( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.jobs.with_streaming_response.retrieve( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - client.fine_tuning.jobs.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - job = client.fine_tuning.jobs.list() - assert_matches_type(JobListResponse, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - job = client.fine_tuning.jobs.list( - after="after", - limit=0, - metadata={"foo": "string"}, - ) - assert_matches_type(JobListResponse, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: - job = client.fine_tuning.jobs.cancel( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.fine_tuning.jobs.with_raw_response.cancel( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.fine_tuning.jobs.with_streaming_response.cancel( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - client.fine_tuning.jobs.with_raw_response.cancel( - "", - ) - - -class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - job = await async_client.fine_tuning.jobs.create( - model="gpt-4o-mini", - training_file="file-abc123", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - job = await async_client.fine_tuning.jobs.create( - model="gpt-4o-mini", - training_file="file-abc123", - hyperparameters={ - "batch_size": "auto", - "learning_rate_multiplier": "auto", - "n_epochs": "auto", - }, - integrations=[ - { - "type": "wandb", - "wandb": { - "project": "my-wandb-project", - "entity": "entity", - "name": "name", - "tags": ["custom-tag"], - }, - } - ], - metadata={"foo": "string"}, - method={ - "dpo": { - "hyperparameters": { - "batch_size": "auto", - "beta": "auto", - "learning_rate_multiplier": "auto", - "n_epochs": "auto", - } - }, - "supervised": { - "hyperparameters": { - "batch_size": "auto", - "learning_rate_multiplier": "auto", - "n_epochs": "auto", - } - }, - "type": "supervised", - }, - seed=42, - suffix="x", - validation_file="file-abc123", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.jobs.with_raw_response.create( - model="gpt-4o-mini", - training_file="file-abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.jobs.with_streaming_response.create( - model="gpt-4o-mini", - training_file="file-abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - job = await async_client.fine_tuning.jobs.retrieve( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.jobs.with_raw_response.retrieve( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.jobs.with_streaming_response.retrieve( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - await async_client.fine_tuning.jobs.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - job = await async_client.fine_tuning.jobs.list() - assert_matches_type(JobListResponse, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - job = await async_client.fine_tuning.jobs.list( - after="after", - limit=0, - metadata={"foo": "string"}, - ) - assert_matches_type(JobListResponse, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(JobListResponse, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - job = await async_client.fine_tuning.jobs.cancel( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.fine_tuning.jobs.with_raw_response.cancel( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - job = await response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.fine_tuning.jobs.with_streaming_response.cancel( - "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - job = await response.parse() - assert_matches_type(FineTuningJob, job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): - await async_client.fine_tuning.jobs.with_raw_response.cancel( - "", - ) diff --git a/tests/api_resources/organization/__init__.py b/tests/api_resources/organization/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/organization/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/organization/projects/__init__.py b/tests/api_resources/organization/projects/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/organization/projects/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/organization/projects/test_api_keys.py b/tests/api_resources/organization/projects/test_api_keys.py deleted file mode 100644 index d8c6bbc0..00000000 --- a/tests/api_resources/organization/projects/test_api_keys.py +++ /dev/null @@ -1,338 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization.projects import ( - APIKey, - APIKeyListResponse, - APIKeyDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.organization.projects.api_keys.retrieve( - key_id="key_id", - project_id="project_id", - ) - assert_matches_type(APIKey, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.api_keys.with_raw_response.retrieve( - key_id="key_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKey, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.api_keys.with_streaming_response.retrieve( - key_id="key_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKey, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.api_keys.with_raw_response.retrieve( - key_id="key_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - client.organization.projects.api_keys.with_raw_response.retrieve( - key_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.organization.projects.api_keys.list( - project_id="project_id", - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.organization.projects.api_keys.list( - project_id="project_id", - after="after", - limit=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.api_keys.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.api_keys.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.api_keys.with_raw_response.list( - project_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.organization.projects.api_keys.delete( - key_id="key_id", - project_id="project_id", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.api_keys.with_raw_response.delete( - key_id="key_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.api_keys.with_streaming_response.delete( - key_id="key_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.api_keys.with_raw_response.delete( - key_id="key_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - client.organization.projects.api_keys.with_raw_response.delete( - key_id="", - project_id="project_id", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.organization.projects.api_keys.retrieve( - key_id="key_id", - project_id="project_id", - ) - assert_matches_type(APIKey, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.api_keys.with_raw_response.retrieve( - key_id="key_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKey, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.api_keys.with_streaming_response.retrieve( - key_id="key_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKey, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.api_keys.with_raw_response.retrieve( - key_id="key_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - await async_client.organization.projects.api_keys.with_raw_response.retrieve( - key_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.organization.projects.api_keys.list( - project_id="project_id", - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.organization.projects.api_keys.list( - project_id="project_id", - after="after", - limit=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.api_keys.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.api_keys.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.api_keys.with_raw_response.list( - project_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.organization.projects.api_keys.delete( - key_id="key_id", - project_id="project_id", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.api_keys.with_raw_response.delete( - key_id="key_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.api_keys.with_streaming_response.delete( - key_id="key_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.api_keys.with_raw_response.delete( - key_id="key_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - await async_client.organization.projects.api_keys.with_raw_response.delete( - key_id="", - project_id="project_id", - ) diff --git a/tests/api_resources/organization/projects/test_rate_limits.py b/tests/api_resources/organization/projects/test_rate_limits.py deleted file mode 100644 index 3f7688b4..00000000 --- a/tests/api_resources/organization/projects/test_rate_limits.py +++ /dev/null @@ -1,265 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization.projects import ( - RateLimit, - RateLimitListResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRateLimits: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - rate_limit = client.organization.projects.rate_limits.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - ) - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - rate_limit = client.organization.projects.rate_limits.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - batch_1_day_max_input_tokens=0, - max_audio_megabytes_per_1_minute=0, - max_images_per_1_minute=0, - max_requests_per_1_day=0, - max_requests_per_1_minute=0, - max_tokens_per_1_minute=0, - ) - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.rate_limits.with_raw_response.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rate_limit = response.parse() - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.rate_limits.with_streaming_response.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rate_limit = response.parse() - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.rate_limits.with_raw_response.update( - rate_limit_id="rate_limit_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"): - client.organization.projects.rate_limits.with_raw_response.update( - rate_limit_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - rate_limit = client.organization.projects.rate_limits.list( - project_id="project_id", - ) - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - rate_limit = client.organization.projects.rate_limits.list( - project_id="project_id", - after="after", - before="before", - limit=0, - ) - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.rate_limits.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rate_limit = response.parse() - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.rate_limits.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rate_limit = response.parse() - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.rate_limits.with_raw_response.list( - project_id="", - ) - - -class TestAsyncRateLimits: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - rate_limit = await async_client.organization.projects.rate_limits.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - ) - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - rate_limit = await async_client.organization.projects.rate_limits.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - batch_1_day_max_input_tokens=0, - max_audio_megabytes_per_1_minute=0, - max_images_per_1_minute=0, - max_requests_per_1_day=0, - max_requests_per_1_minute=0, - max_tokens_per_1_minute=0, - ) - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.rate_limits.with_raw_response.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rate_limit = await response.parse() - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.rate_limits.with_streaming_response.update( - rate_limit_id="rate_limit_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rate_limit = await response.parse() - assert_matches_type(RateLimit, rate_limit, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.rate_limits.with_raw_response.update( - rate_limit_id="rate_limit_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `rate_limit_id` but received ''"): - await async_client.organization.projects.rate_limits.with_raw_response.update( - rate_limit_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - rate_limit = await async_client.organization.projects.rate_limits.list( - project_id="project_id", - ) - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - rate_limit = await async_client.organization.projects.rate_limits.list( - project_id="project_id", - after="after", - before="before", - limit=0, - ) - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.rate_limits.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rate_limit = await response.parse() - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.rate_limits.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rate_limit = await response.parse() - assert_matches_type(RateLimitListResponse, rate_limit, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.rate_limits.with_raw_response.list( - project_id="", - ) diff --git a/tests/api_resources/organization/projects/test_service_accounts.py b/tests/api_resources/organization/projects/test_service_accounts.py deleted file mode 100644 index 4cbdbd38..00000000 --- a/tests/api_resources/organization/projects/test_service_accounts.py +++ /dev/null @@ -1,431 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization.projects import ( - ServiceAccount, - ServiceAccountListResponse, - ServiceAccountCreateResponse, - ServiceAccountDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestServiceAccounts: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - service_account = client.organization.projects.service_accounts.create( - project_id="project_id", - name="name", - ) - assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.service_accounts.with_raw_response.create( - project_id="project_id", - name="name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = response.parse() - assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.service_accounts.with_streaming_response.create( - project_id="project_id", - name="name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = response.parse() - assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.service_accounts.with_raw_response.create( - project_id="", - name="name", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - service_account = client.organization.projects.service_accounts.retrieve( - service_account_id="service_account_id", - project_id="project_id", - ) - assert_matches_type(ServiceAccount, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.service_accounts.with_raw_response.retrieve( - service_account_id="service_account_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = response.parse() - assert_matches_type(ServiceAccount, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.service_accounts.with_streaming_response.retrieve( - service_account_id="service_account_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = response.parse() - assert_matches_type(ServiceAccount, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.service_accounts.with_raw_response.retrieve( - service_account_id="service_account_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): - client.organization.projects.service_accounts.with_raw_response.retrieve( - service_account_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - service_account = client.organization.projects.service_accounts.list( - project_id="project_id", - ) - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - service_account = client.organization.projects.service_accounts.list( - project_id="project_id", - after="after", - limit=0, - ) - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.service_accounts.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = response.parse() - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.service_accounts.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = response.parse() - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.service_accounts.with_raw_response.list( - project_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - service_account = client.organization.projects.service_accounts.delete( - service_account_id="service_account_id", - project_id="project_id", - ) - assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.service_accounts.with_raw_response.delete( - service_account_id="service_account_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = response.parse() - assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.service_accounts.with_streaming_response.delete( - service_account_id="service_account_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = response.parse() - assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.service_accounts.with_raw_response.delete( - service_account_id="service_account_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): - client.organization.projects.service_accounts.with_raw_response.delete( - service_account_id="", - project_id="project_id", - ) - - -class TestAsyncServiceAccounts: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - service_account = await async_client.organization.projects.service_accounts.create( - project_id="project_id", - name="name", - ) - assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.service_accounts.with_raw_response.create( - project_id="project_id", - name="name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = await response.parse() - assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.service_accounts.with_streaming_response.create( - project_id="project_id", - name="name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = await response.parse() - assert_matches_type(ServiceAccountCreateResponse, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.service_accounts.with_raw_response.create( - project_id="", - name="name", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - service_account = await async_client.organization.projects.service_accounts.retrieve( - service_account_id="service_account_id", - project_id="project_id", - ) - assert_matches_type(ServiceAccount, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.service_accounts.with_raw_response.retrieve( - service_account_id="service_account_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = await response.parse() - assert_matches_type(ServiceAccount, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.service_accounts.with_streaming_response.retrieve( - service_account_id="service_account_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = await response.parse() - assert_matches_type(ServiceAccount, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.service_accounts.with_raw_response.retrieve( - service_account_id="service_account_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): - await async_client.organization.projects.service_accounts.with_raw_response.retrieve( - service_account_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - service_account = await async_client.organization.projects.service_accounts.list( - project_id="project_id", - ) - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - service_account = await async_client.organization.projects.service_accounts.list( - project_id="project_id", - after="after", - limit=0, - ) - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.service_accounts.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = await response.parse() - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.service_accounts.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = await response.parse() - assert_matches_type(ServiceAccountListResponse, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.service_accounts.with_raw_response.list( - project_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - service_account = await async_client.organization.projects.service_accounts.delete( - service_account_id="service_account_id", - project_id="project_id", - ) - assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.service_accounts.with_raw_response.delete( - service_account_id="service_account_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - service_account = await response.parse() - assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.service_accounts.with_streaming_response.delete( - service_account_id="service_account_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - service_account = await response.parse() - assert_matches_type(ServiceAccountDeleteResponse, service_account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.service_accounts.with_raw_response.delete( - service_account_id="service_account_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `service_account_id` but received ''"): - await async_client.organization.projects.service_accounts.with_raw_response.delete( - service_account_id="", - project_id="project_id", - ) diff --git a/tests/api_resources/organization/projects/test_users.py b/tests/api_resources/organization/projects/test_users.py deleted file mode 100644 index df2a136e..00000000 --- a/tests/api_resources/organization/projects/test_users.py +++ /dev/null @@ -1,552 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization.projects import ( - ProjectUser, - UserListResponse, - UserDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestUsers: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.projects.users.retrieve( - user_id="user_id", - project_id="project_id", - ) - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.users.with_raw_response.retrieve( - user_id="user_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.users.with_streaming_response.retrieve( - user_id="user_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.users.with_raw_response.retrieve( - user_id="user_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - client.organization.projects.users.with_raw_response.retrieve( - user_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.projects.users.update( - user_id="user_id", - project_id="project_id", - role="owner", - ) - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.users.with_raw_response.update( - user_id="user_id", - project_id="project_id", - role="owner", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.users.with_streaming_response.update( - user_id="user_id", - project_id="project_id", - role="owner", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.users.with_raw_response.update( - user_id="user_id", - project_id="", - role="owner", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - client.organization.projects.users.with_raw_response.update( - user_id="", - project_id="project_id", - role="owner", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.projects.users.list( - project_id="project_id", - ) - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.projects.users.list( - project_id="project_id", - after="after", - limit=0, - ) - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.users.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.users.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.users.with_raw_response.list( - project_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.projects.users.delete( - user_id="user_id", - project_id="project_id", - ) - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.users.with_raw_response.delete( - user_id="user_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.users.with_streaming_response.delete( - user_id="user_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.users.with_raw_response.delete( - user_id="user_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - client.organization.projects.users.with_raw_response.delete( - user_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.projects.users.add( - project_id="project_id", - role="owner", - user_id="user_id", - ) - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.users.with_raw_response.add( - project_id="project_id", - role="owner", - user_id="user_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.users.with_streaming_response.add( - project_id="project_id", - role="owner", - user_id="user_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.users.with_raw_response.add( - project_id="", - role="owner", - user_id="user_id", - ) - - -class TestAsyncUsers: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.projects.users.retrieve( - user_id="user_id", - project_id="project_id", - ) - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.users.with_raw_response.retrieve( - user_id="user_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.users.with_streaming_response.retrieve( - user_id="user_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.retrieve( - user_id="user_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.retrieve( - user_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.projects.users.update( - user_id="user_id", - project_id="project_id", - role="owner", - ) - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.users.with_raw_response.update( - user_id="user_id", - project_id="project_id", - role="owner", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.users.with_streaming_response.update( - user_id="user_id", - project_id="project_id", - role="owner", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.update( - user_id="user_id", - project_id="", - role="owner", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.update( - user_id="", - project_id="project_id", - role="owner", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.projects.users.list( - project_id="project_id", - ) - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.projects.users.list( - project_id="project_id", - after="after", - limit=0, - ) - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.users.with_raw_response.list( - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.users.with_streaming_response.list( - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.list( - project_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.projects.users.delete( - user_id="user_id", - project_id="project_id", - ) - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.users.with_raw_response.delete( - user_id="user_id", - project_id="project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.users.with_streaming_response.delete( - user_id="user_id", - project_id="project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.delete( - user_id="user_id", - project_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.delete( - user_id="", - project_id="project_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.projects.users.add( - project_id="project_id", - role="owner", - user_id="user_id", - ) - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.users.with_raw_response.add( - project_id="project_id", - role="owner", - user_id="user_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.users.with_streaming_response.add( - project_id="project_id", - role="owner", - user_id="user_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(ProjectUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.users.with_raw_response.add( - project_id="", - role="owner", - user_id="user_id", - ) diff --git a/tests/api_resources/organization/test_admin_api_keys.py b/tests/api_resources/organization/test_admin_api_keys.py deleted file mode 100644 index 0e0949a1..00000000 --- a/tests/api_resources/organization/test_admin_api_keys.py +++ /dev/null @@ -1,338 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization import ( - AdminAPIKey, - AdminAPIKeyListResponse, - AdminAPIKeyDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAdminAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - admin_api_key = client.organization.admin_api_keys.create( - name="New Admin Key", - ) - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.admin_api_keys.with_raw_response.create( - name="New Admin Key", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.admin_api_keys.with_streaming_response.create( - name="New Admin Key", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - admin_api_key = client.organization.admin_api_keys.retrieve( - "key_id", - ) - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.admin_api_keys.with_raw_response.retrieve( - "key_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.admin_api_keys.with_streaming_response.retrieve( - "key_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - client.organization.admin_api_keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - admin_api_key = client.organization.admin_api_keys.list() - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - admin_api_key = client.organization.admin_api_keys.list( - after="after", - limit=0, - order="asc", - ) - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.admin_api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = response.parse() - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.admin_api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = response.parse() - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - admin_api_key = client.organization.admin_api_keys.delete( - "key_id", - ) - assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.admin_api_keys.with_raw_response.delete( - "key_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = response.parse() - assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.admin_api_keys.with_streaming_response.delete( - "key_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = response.parse() - assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - client.organization.admin_api_keys.with_raw_response.delete( - "", - ) - - -class TestAsyncAdminAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - admin_api_key = await async_client.organization.admin_api_keys.create( - name="New Admin Key", - ) - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.admin_api_keys.with_raw_response.create( - name="New Admin Key", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.admin_api_keys.with_streaming_response.create( - name="New Admin Key", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - admin_api_key = await async_client.organization.admin_api_keys.retrieve( - "key_id", - ) - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.admin_api_keys.with_raw_response.retrieve( - "key_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.admin_api_keys.with_streaming_response.retrieve( - "key_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKey, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - await async_client.organization.admin_api_keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - admin_api_key = await async_client.organization.admin_api_keys.list() - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - admin_api_key = await async_client.organization.admin_api_keys.list( - after="after", - limit=0, - order="asc", - ) - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.admin_api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.admin_api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKeyListResponse, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - admin_api_key = await async_client.organization.admin_api_keys.delete( - "key_id", - ) - assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.admin_api_keys.with_raw_response.delete( - "key_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.admin_api_keys.with_streaming_response.delete( - "key_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - admin_api_key = await response.parse() - assert_matches_type(AdminAPIKeyDeleteResponse, admin_api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `key_id` but received ''"): - await async_client.organization.admin_api_keys.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/organization/test_invites.py b/tests/api_resources/organization/test_invites.py deleted file mode 100644 index 73528d26..00000000 --- a/tests/api_resources/organization/test_invites.py +++ /dev/null @@ -1,372 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization import ( - Invite, - InviteListResponse, - InviteDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestInvites: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - invite = client.organization.invites.create( - email="email", - role="reader", - ) - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - invite = client.organization.invites.create( - email="email", - role="reader", - projects=[ - { - "id": "id", - "role": "member", - } - ], - ) - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.invites.with_raw_response.create( - email="email", - role="reader", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.invites.with_streaming_response.create( - email="email", - role="reader", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - invite = client.organization.invites.retrieve( - "invite_id", - ) - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.invites.with_raw_response.retrieve( - "invite_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.invites.with_streaming_response.retrieve( - "invite_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): - client.organization.invites.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - invite = client.organization.invites.list() - assert_matches_type(InviteListResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - invite = client.organization.invites.list( - after="after", - limit=0, - ) - assert_matches_type(InviteListResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.invites.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = response.parse() - assert_matches_type(InviteListResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.invites.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = response.parse() - assert_matches_type(InviteListResponse, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - invite = client.organization.invites.delete( - "invite_id", - ) - assert_matches_type(InviteDeleteResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.invites.with_raw_response.delete( - "invite_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = response.parse() - assert_matches_type(InviteDeleteResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.invites.with_streaming_response.delete( - "invite_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = response.parse() - assert_matches_type(InviteDeleteResponse, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): - client.organization.invites.with_raw_response.delete( - "", - ) - - -class TestAsyncInvites: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - invite = await async_client.organization.invites.create( - email="email", - role="reader", - ) - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - invite = await async_client.organization.invites.create( - email="email", - role="reader", - projects=[ - { - "id": "id", - "role": "member", - } - ], - ) - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.invites.with_raw_response.create( - email="email", - role="reader", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = await response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.invites.with_streaming_response.create( - email="email", - role="reader", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = await response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - invite = await async_client.organization.invites.retrieve( - "invite_id", - ) - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.invites.with_raw_response.retrieve( - "invite_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = await response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.invites.with_streaming_response.retrieve( - "invite_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = await response.parse() - assert_matches_type(Invite, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): - await async_client.organization.invites.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - invite = await async_client.organization.invites.list() - assert_matches_type(InviteListResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - invite = await async_client.organization.invites.list( - after="after", - limit=0, - ) - assert_matches_type(InviteListResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.invites.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = await response.parse() - assert_matches_type(InviteListResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.invites.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = await response.parse() - assert_matches_type(InviteListResponse, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - invite = await async_client.organization.invites.delete( - "invite_id", - ) - assert_matches_type(InviteDeleteResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.invites.with_raw_response.delete( - "invite_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - invite = await response.parse() - assert_matches_type(InviteDeleteResponse, invite, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.invites.with_streaming_response.delete( - "invite_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - invite = await response.parse() - assert_matches_type(InviteDeleteResponse, invite, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `invite_id` but received ''"): - await async_client.organization.invites.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/organization/test_projects.py b/tests/api_resources/organization/test_projects.py deleted file mode 100644 index 6b9dd9a4..00000000 --- a/tests/api_resources/organization/test_projects.py +++ /dev/null @@ -1,429 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization import ( - Project, - ProjectListResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestProjects: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - project = client.organization.projects.create( - name="name", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.with_raw_response.create( - name="name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.with_streaming_response.create( - name="name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - project = client.organization.projects.retrieve( - "project_id", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.with_raw_response.retrieve( - "project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.with_streaming_response.retrieve( - "project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - project = client.organization.projects.update( - project_id="project_id", - name="name", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.with_raw_response.update( - project_id="project_id", - name="name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.with_streaming_response.update( - project_id="project_id", - name="name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.with_raw_response.update( - project_id="", - name="name", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - project = client.organization.projects.list() - assert_matches_type(ProjectListResponse, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - project = client.organization.projects.list( - after="after", - include_archived=True, - limit=0, - ) - assert_matches_type(ProjectListResponse, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = response.parse() - assert_matches_type(ProjectListResponse, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = response.parse() - assert_matches_type(ProjectListResponse, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_archive(self, client: DigitaloceanGenaiSDK) -> None: - project = client.organization.projects.archive( - "project_id", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_archive(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.projects.with_raw_response.archive( - "project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_archive(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.projects.with_streaming_response.archive( - "project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_archive(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.organization.projects.with_raw_response.archive( - "", - ) - - -class TestAsyncProjects: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - project = await async_client.organization.projects.create( - name="name", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.with_raw_response.create( - name="name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.with_streaming_response.create( - name="name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - project = await async_client.organization.projects.retrieve( - "project_id", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.with_raw_response.retrieve( - "project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.with_streaming_response.retrieve( - "project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - project = await async_client.organization.projects.update( - project_id="project_id", - name="name", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.with_raw_response.update( - project_id="project_id", - name="name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.with_streaming_response.update( - project_id="project_id", - name="name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.with_raw_response.update( - project_id="", - name="name", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - project = await async_client.organization.projects.list() - assert_matches_type(ProjectListResponse, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - project = await async_client.organization.projects.list( - after="after", - include_archived=True, - limit=0, - ) - assert_matches_type(ProjectListResponse, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = await response.parse() - assert_matches_type(ProjectListResponse, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = await response.parse() - assert_matches_type(ProjectListResponse, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - project = await async_client.organization.projects.archive( - "project_id", - ) - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.projects.with_raw_response.archive( - "project_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.projects.with_streaming_response.archive( - "project_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - project = await response.parse() - assert_matches_type(Project, project, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_archive(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.organization.projects.with_raw_response.archive( - "", - ) diff --git a/tests/api_resources/organization/test_usage.py b/tests/api_resources/organization/test_usage.py deleted file mode 100644 index 198f2159..00000000 --- a/tests/api_resources/organization/test_usage.py +++ /dev/null @@ -1,834 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import UsageResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestUsage: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.audio_speeches( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_audio_speeches_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.audio_speeches( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.audio_speeches( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_audio_speeches(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.audio_speeches( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.audio_transcriptions( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_audio_transcriptions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.audio_transcriptions( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.audio_transcriptions( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_audio_transcriptions(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.audio_transcriptions( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.code_interpreter_sessions( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_code_interpreter_sessions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.code_interpreter_sessions( - start_time=0, - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - page="page", - project_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.code_interpreter_sessions( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_code_interpreter_sessions(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.code_interpreter_sessions( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_completions(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.completions( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_completions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.completions( - start_time=0, - api_key_ids=["string"], - batch=True, - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_completions(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.completions( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_completions(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.completions( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_embeddings(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.embeddings( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_embeddings_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.embeddings( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.embeddings( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_embeddings(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.embeddings( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_images(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.images( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_images_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.images( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - sizes=["256x256"], - sources=["image.generation"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_images(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.images( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_images(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.images( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_moderations(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.moderations( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_moderations_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.moderations( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_moderations(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.moderations( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_moderations(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.moderations( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_vector_stores(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.vector_stores( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_vector_stores_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - usage = client.organization.usage.vector_stores( - start_time=0, - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - page="page", - project_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.usage.with_raw_response.vector_stores( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_vector_stores(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.usage.with_streaming_response.vector_stores( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncUsage: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.audio_speeches( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_audio_speeches_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.audio_speeches( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.audio_speeches( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_audio_speeches(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.audio_speeches( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.audio_transcriptions( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_audio_transcriptions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.audio_transcriptions( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.audio_transcriptions( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_audio_transcriptions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.audio_transcriptions( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.code_interpreter_sessions( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_code_interpreter_sessions_with_all_params( - self, async_client: AsyncDigitaloceanGenaiSDK - ) -> None: - usage = await async_client.organization.usage.code_interpreter_sessions( - start_time=0, - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - page="page", - project_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.code_interpreter_sessions( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_code_interpreter_sessions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.code_interpreter_sessions( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.completions( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_completions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.completions( - start_time=0, - api_key_ids=["string"], - batch=True, - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.completions( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_completions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.completions( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.embeddings( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_embeddings_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.embeddings( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.embeddings( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_embeddings(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.embeddings( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.images( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_images_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.images( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - sizes=["256x256"], - sources=["image.generation"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.images( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_images(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.images( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.moderations( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_moderations_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.moderations( - start_time=0, - api_key_ids=["string"], - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - models=["string"], - page="page", - project_ids=["string"], - user_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.moderations( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_moderations(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.moderations( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.vector_stores( - start_time=0, - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_vector_stores_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - usage = await async_client.organization.usage.vector_stores( - start_time=0, - bucket_width="1m", - end_time=0, - group_by=["project_id"], - limit=0, - page="page", - project_ids=["string"], - ) - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.usage.with_raw_response.vector_stores( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_vector_stores(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.usage.with_streaming_response.vector_stores( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - usage = await response.parse() - assert_matches_type(UsageResponse, usage, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/organization/test_users.py b/tests/api_resources/organization/test_users.py deleted file mode 100644 index b40fcbef..00000000 --- a/tests/api_resources/organization/test_users.py +++ /dev/null @@ -1,362 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.organization import ( - OrganizationUser, - UserListResponse, - UserDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestUsers: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.users.retrieve( - "user_id", - ) - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.users.with_raw_response.retrieve( - "user_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.users.with_streaming_response.retrieve( - "user_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - client.organization.users.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.users.update( - user_id="user_id", - role="owner", - ) - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.users.with_raw_response.update( - user_id="user_id", - role="owner", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.users.with_streaming_response.update( - user_id="user_id", - role="owner", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - client.organization.users.with_raw_response.update( - user_id="", - role="owner", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.users.list() - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.users.list( - after="after", - emails=["string"], - limit=0, - ) - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.users.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.users.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - user = client.organization.users.delete( - "user_id", - ) - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.users.with_raw_response.delete( - "user_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.users.with_streaming_response.delete( - "user_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - client.organization.users.with_raw_response.delete( - "", - ) - - -class TestAsyncUsers: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.users.retrieve( - "user_id", - ) - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.users.with_raw_response.retrieve( - "user_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.users.with_streaming_response.retrieve( - "user_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - await async_client.organization.users.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.users.update( - user_id="user_id", - role="owner", - ) - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.users.with_raw_response.update( - user_id="user_id", - role="owner", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.users.with_streaming_response.update( - user_id="user_id", - role="owner", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(OrganizationUser, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - await async_client.organization.users.with_raw_response.update( - user_id="", - role="owner", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.users.list() - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.users.list( - after="after", - emails=["string"], - limit=0, - ) - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.users.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.users.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(UserListResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - user = await async_client.organization.users.delete( - "user_id", - ) - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.users.with_raw_response.delete( - "user_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - user = await response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.users.with_streaming_response.delete( - "user_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - user = await response.parse() - assert_matches_type(UserDeleteResponse, user, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `user_id` but received ''"): - await async_client.organization.users.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_assistants.py b/tests/api_resources/test_assistants.py deleted file mode 100644 index a5fa998d..00000000 --- a/tests/api_resources/test_assistants.py +++ /dev/null @@ -1,528 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - AssistantObject, - AssistantListResponse, - AssistantDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAssistants: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.create( - model="gpt-4o", - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.create( - model="gpt-4o", - description="description", - instructions="instructions", - metadata={"foo": "string"}, - name="name", - reasoning_effort="low", - response_format="auto", - temperature=1, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, - tools=[{"type": "code_interpreter"}], - top_p=1, - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.assistants.with_raw_response.create( - model="gpt-4o", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.assistants.with_streaming_response.create( - model="gpt-4o", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.retrieve( - "assistant_id", - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.assistants.with_raw_response.retrieve( - "assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.assistants.with_streaming_response.retrieve( - "assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): - client.assistants.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.update( - assistant_id="assistant_id", - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.update( - assistant_id="assistant_id", - description="description", - instructions="instructions", - metadata={"foo": "string"}, - model="string", - name="name", - reasoning_effort="low", - response_format="auto", - temperature=1, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": {"vector_store_ids": ["string"]}, - }, - tools=[{"type": "code_interpreter"}], - top_p=1, - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.assistants.with_raw_response.update( - assistant_id="assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.assistants.with_streaming_response.update( - assistant_id="assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): - client.assistants.with_raw_response.update( - assistant_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.list() - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.list( - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.assistants.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = response.parse() - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.assistants.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = response.parse() - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - assistant = client.assistants.delete( - "assistant_id", - ) - assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.assistants.with_raw_response.delete( - "assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = response.parse() - assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.assistants.with_streaming_response.delete( - "assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = response.parse() - assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): - client.assistants.with_raw_response.delete( - "", - ) - - -class TestAsyncAssistants: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.create( - model="gpt-4o", - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.create( - model="gpt-4o", - description="description", - instructions="instructions", - metadata={"foo": "string"}, - name="name", - reasoning_effort="low", - response_format="auto", - temperature=1, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, - tools=[{"type": "code_interpreter"}], - top_p=1, - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.assistants.with_raw_response.create( - model="gpt-4o", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = await response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.assistants.with_streaming_response.create( - model="gpt-4o", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = await response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.retrieve( - "assistant_id", - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.assistants.with_raw_response.retrieve( - "assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = await response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.assistants.with_streaming_response.retrieve( - "assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = await response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): - await async_client.assistants.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.update( - assistant_id="assistant_id", - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.update( - assistant_id="assistant_id", - description="description", - instructions="instructions", - metadata={"foo": "string"}, - model="string", - name="name", - reasoning_effort="low", - response_format="auto", - temperature=1, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": {"vector_store_ids": ["string"]}, - }, - tools=[{"type": "code_interpreter"}], - top_p=1, - ) - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.assistants.with_raw_response.update( - assistant_id="assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = await response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.assistants.with_streaming_response.update( - assistant_id="assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = await response.parse() - assert_matches_type(AssistantObject, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): - await async_client.assistants.with_raw_response.update( - assistant_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.list() - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.list( - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.assistants.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = await response.parse() - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.assistants.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = await response.parse() - assert_matches_type(AssistantListResponse, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - assistant = await async_client.assistants.delete( - "assistant_id", - ) - assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.assistants.with_raw_response.delete( - "assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - assistant = await response.parse() - assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.assistants.with_streaming_response.delete( - "assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - assistant = await response.parse() - assert_matches_type(AssistantDeleteResponse, assistant, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): - await async_client.assistants.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_audio.py b/tests/api_resources/test_audio.py deleted file mode 100644 index e71d568e..00000000 --- a/tests/api_resources/test_audio.py +++ /dev/null @@ -1,383 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import httpx -import pytest -from respx import MockRouter - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - AudioTranslateAudioResponse, - AudioTranscribeAudioResponse, -) -from digitalocean_genai_sdk._response import ( - BinaryAPIResponse, - AsyncBinaryAPIResponse, - StreamedBinaryAPIResponse, - AsyncStreamedBinaryAPIResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAudio: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_method_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - audio = client.audio.generate_speech( - input="input", - model="string", - voice="ash", - ) - assert audio.is_closed - assert audio.json() == {"foo": "bar"} - assert cast(Any, audio.is_closed) is True - assert isinstance(audio, BinaryAPIResponse) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_method_generate_speech_with_all_params(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - audio = client.audio.generate_speech( - input="input", - model="string", - voice="ash", - instructions="instructions", - response_format="mp3", - speed=0.25, - ) - assert audio.is_closed - assert audio.json() == {"foo": "bar"} - assert cast(Any, audio.is_closed) is True - assert isinstance(audio, BinaryAPIResponse) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_raw_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - - audio = client.audio.with_raw_response.generate_speech( - input="input", - model="string", - voice="ash", - ) - - assert audio.is_closed is True - assert audio.http_request.headers.get("X-Stainless-Lang") == "python" - assert audio.json() == {"foo": "bar"} - assert isinstance(audio, BinaryAPIResponse) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - def test_streaming_response_generate_speech(self, client: DigitaloceanGenaiSDK, respx_mock: MockRouter) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - with client.audio.with_streaming_response.generate_speech( - input="input", - model="string", - voice="ash", - ) as audio: - assert not audio.is_closed - assert audio.http_request.headers.get("X-Stainless-Lang") == "python" - - assert audio.json() == {"foo": "bar"} - assert cast(Any, audio.is_closed) is True - assert isinstance(audio, StreamedBinaryAPIResponse) - - assert cast(Any, audio.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None: - audio = client.audio.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - ) - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_transcribe_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - audio = client.audio.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - include=["logprobs"], - language="language", - prompt="prompt", - response_format="json", - stream=True, - temperature=0, - timestamp_granularities=["word"], - ) - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None: - response = client.audio.with_raw_response.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - audio = response.parse() - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_transcribe_audio(self, client: DigitaloceanGenaiSDK) -> None: - with client.audio.with_streaming_response.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - audio = response.parse() - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_translate_audio(self, client: DigitaloceanGenaiSDK) -> None: - audio = client.audio.translate_audio( - file=b"raw file contents", - model="whisper-1", - ) - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_translate_audio_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - audio = client.audio.translate_audio( - file=b"raw file contents", - model="whisper-1", - prompt="prompt", - response_format="json", - temperature=0, - ) - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None: - response = client.audio.with_raw_response.translate_audio( - file=b"raw file contents", - model="whisper-1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - audio = response.parse() - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_translate_audio(self, client: DigitaloceanGenaiSDK) -> None: - with client.audio.with_streaming_response.translate_audio( - file=b"raw file contents", - model="whisper-1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - audio = response.parse() - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncAudio: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_method_generate_speech( - self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter - ) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - audio = await async_client.audio.generate_speech( - input="input", - model="string", - voice="ash", - ) - assert audio.is_closed - assert await audio.json() == {"foo": "bar"} - assert cast(Any, audio.is_closed) is True - assert isinstance(audio, AsyncBinaryAPIResponse) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_method_generate_speech_with_all_params( - self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter - ) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - audio = await async_client.audio.generate_speech( - input="input", - model="string", - voice="ash", - instructions="instructions", - response_format="mp3", - speed=0.25, - ) - assert audio.is_closed - assert await audio.json() == {"foo": "bar"} - assert cast(Any, audio.is_closed) is True - assert isinstance(audio, AsyncBinaryAPIResponse) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_raw_response_generate_speech( - self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter - ) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - - audio = await async_client.audio.with_raw_response.generate_speech( - input="input", - model="string", - voice="ash", - ) - - assert audio.is_closed is True - assert audio.http_request.headers.get("X-Stainless-Lang") == "python" - assert await audio.json() == {"foo": "bar"} - assert isinstance(audio, AsyncBinaryAPIResponse) - - @pytest.mark.skip() - @parametrize - @pytest.mark.respx(base_url=base_url) - async def test_streaming_response_generate_speech( - self, async_client: AsyncDigitaloceanGenaiSDK, respx_mock: MockRouter - ) -> None: - respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - async with async_client.audio.with_streaming_response.generate_speech( - input="input", - model="string", - voice="ash", - ) as audio: - assert not audio.is_closed - assert audio.http_request.headers.get("X-Stainless-Lang") == "python" - - assert await audio.json() == {"foo": "bar"} - assert cast(Any, audio.is_closed) is True - assert isinstance(audio, AsyncStreamedBinaryAPIResponse) - - assert cast(Any, audio.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - audio = await async_client.audio.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - ) - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_transcribe_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - audio = await async_client.audio.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - include=["logprobs"], - language="language", - prompt="prompt", - response_format="json", - stream=True, - temperature=0, - timestamp_granularities=["word"], - ) - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.audio.with_raw_response.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - audio = await response.parse() - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_transcribe_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.audio.with_streaming_response.transcribe_audio( - file=b"raw file contents", - model="gpt-4o-transcribe", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - audio = await response.parse() - assert_matches_type(AudioTranscribeAudioResponse, audio, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - audio = await async_client.audio.translate_audio( - file=b"raw file contents", - model="whisper-1", - ) - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_translate_audio_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - audio = await async_client.audio.translate_audio( - file=b"raw file contents", - model="whisper-1", - prompt="prompt", - response_format="json", - temperature=0, - ) - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.audio.with_raw_response.translate_audio( - file=b"raw file contents", - model="whisper-1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - audio = await response.parse() - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_translate_audio(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.audio.with_streaming_response.translate_audio( - file=b"raw file contents", - model="whisper-1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - audio = await response.parse() - assert_matches_type(AudioTranslateAudioResponse, audio, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py deleted file mode 100644 index 6ad0bbee..00000000 --- a/tests/api_resources/test_batches.py +++ /dev/null @@ -1,366 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import Batch, BatchListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestBatches: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - batch = client.batches.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - batch = client.batches.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - metadata={"foo": "string"}, - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.batches.with_raw_response.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.batches.with_streaming_response.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - batch = client.batches.retrieve( - "batch_id", - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.batches.with_raw_response.retrieve( - "batch_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.batches.with_streaming_response.retrieve( - "batch_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.batches.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - batch = client.batches.list() - assert_matches_type(BatchListResponse, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - batch = client.batches.list( - after="after", - limit=0, - ) - assert_matches_type(BatchListResponse, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.batches.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = response.parse() - assert_matches_type(BatchListResponse, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.batches.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = response.parse() - assert_matches_type(BatchListResponse, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: - batch = client.batches.cancel( - "batch_id", - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.batches.with_raw_response.cancel( - "batch_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.batches.with_streaming_response.cancel( - "batch_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.batches.with_raw_response.cancel( - "", - ) - - -class TestAsyncBatches: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - batch = await async_client.batches.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - batch = await async_client.batches.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - metadata={"foo": "string"}, - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.batches.with_raw_response.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = await response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.batches.with_streaming_response.create( - completion_window="24h", - endpoint="/v1/responses", - input_file_id="input_file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = await response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - batch = await async_client.batches.retrieve( - "batch_id", - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.batches.with_raw_response.retrieve( - "batch_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = await response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.batches.with_streaming_response.retrieve( - "batch_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = await response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.batches.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - batch = await async_client.batches.list() - assert_matches_type(BatchListResponse, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - batch = await async_client.batches.list( - after="after", - limit=0, - ) - assert_matches_type(BatchListResponse, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.batches.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = await response.parse() - assert_matches_type(BatchListResponse, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.batches.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = await response.parse() - assert_matches_type(BatchListResponse, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - batch = await async_client.batches.cancel( - "batch_id", - ) - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.batches.with_raw_response.cancel( - "batch_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - batch = await response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.batches.with_streaming_response.cancel( - "batch_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - batch = await response.parse() - assert_matches_type(Batch, batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.batches.with_raw_response.cancel( - "", - ) diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py deleted file mode 100644 index eb5c1abd..00000000 --- a/tests/api_resources/test_completions.py +++ /dev/null @@ -1,148 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - CompletionCreateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestCompletions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.completions.create( - model="string", - prompt="This is a test.", - ) - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.completions.create( - model="string", - prompt="This is a test.", - best_of=0, - echo=True, - frequency_penalty=-2, - logit_bias={"foo": 0}, - logprobs=0, - max_tokens=16, - n=1, - presence_penalty=-2, - seed=0, - stop="\n", - stream=True, - stream_options={"include_usage": True}, - suffix="test.", - temperature=1, - top_p=1, - user="user-1234", - ) - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.completions.with_raw_response.create( - model="string", - prompt="This is a test.", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.completions.with_streaming_response.create( - model="string", - prompt="This is a test.", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = response.parse() - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncCompletions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.completions.create( - model="string", - prompt="This is a test.", - ) - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.completions.create( - model="string", - prompt="This is a test.", - best_of=0, - echo=True, - frequency_penalty=-2, - logit_bias={"foo": 0}, - logprobs=0, - max_tokens=16, - n=1, - presence_penalty=-2, - seed=0, - stop="\n", - stream=True, - stream_options={"include_usage": True}, - suffix="test.", - temperature=1, - top_p=1, - user="user-1234", - ) - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.completions.with_raw_response.create( - model="string", - prompt="This is a test.", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.completions.with_streaming_response.create( - model="string", - prompt="This is a test.", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - completion = await response.parse() - assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py index bd3ef322..ea1b5879 100644 --- a/tests/api_resources/test_embeddings.py +++ b/tests/api_resources/test_embeddings.py @@ -32,8 +32,6 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", - dimensions=1, - encoding_format="float", user="user-1234", ) assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) @@ -85,8 +83,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", - dimensions=1, - encoding_format="float", user="user-1234", ) assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py deleted file mode 100644 index b30ae859..00000000 --- a/tests/api_resources/test_files.py +++ /dev/null @@ -1,430 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - OpenAIFile, - FileListResponse, - FileDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFiles: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - file = client.files.retrieve( - "file_id", - ) - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.files.with_raw_response.retrieve( - "file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.files.with_streaming_response.retrieve( - "file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.files.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - file = client.files.list() - assert_matches_type(FileListResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - file = client.files.list( - after="after", - limit=0, - order="asc", - purpose="purpose", - ) - assert_matches_type(FileListResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.files.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(FileListResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.files.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(FileListResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - file = client.files.delete( - "file_id", - ) - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.files.with_raw_response.delete( - "file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.files.with_streaming_response.delete( - "file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.files.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - file = client.files.retrieve_content( - "file_id", - ) - assert_matches_type(str, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - response = client.files.with_raw_response.retrieve_content( - "file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(str, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - with client.files.with_streaming_response.retrieve_content( - "file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(str, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.files.with_raw_response.retrieve_content( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_upload(self, client: DigitaloceanGenaiSDK) -> None: - file = client.files.upload( - file=b"raw file contents", - purpose="assistants", - ) - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_upload(self, client: DigitaloceanGenaiSDK) -> None: - response = client.files.with_raw_response.upload( - file=b"raw file contents", - purpose="assistants", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_upload(self, client: DigitaloceanGenaiSDK) -> None: - with client.files.with_streaming_response.upload( - file=b"raw file contents", - purpose="assistants", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncFiles: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.files.retrieve( - "file_id", - ) - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.files.with_raw_response.retrieve( - "file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.files.with_streaming_response.retrieve( - "file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.files.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.files.list() - assert_matches_type(FileListResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.files.list( - after="after", - limit=0, - order="asc", - purpose="purpose", - ) - assert_matches_type(FileListResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.files.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(FileListResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.files.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(FileListResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.files.delete( - "file_id", - ) - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.files.with_raw_response.delete( - "file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.files.with_streaming_response.delete( - "file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.files.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.files.retrieve_content( - "file_id", - ) - assert_matches_type(str, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.files.with_raw_response.retrieve_content( - "file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(str, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.files.with_streaming_response.retrieve_content( - "file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(str, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.files.with_raw_response.retrieve_content( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.files.upload( - file=b"raw file contents", - purpose="assistants", - ) - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.files.with_raw_response.upload( - file=b"raw file contents", - purpose="assistants", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_upload(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.files.with_streaming_response.upload( - file=b"raw file contents", - purpose="assistants", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(OpenAIFile, file, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py deleted file mode 100644 index 380a0759..00000000 --- a/tests/api_resources/test_images.py +++ /dev/null @@ -1,320 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - ImagesResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestImages: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_edit(self, client: DigitaloceanGenaiSDK) -> None: - image = client.images.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_edit_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - image = client.images.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - mask=b"raw file contents", - model="dall-e-2", - n=1, - response_format="url", - size="1024x1024", - user="user-1234", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None: - response = client.images.with_raw_response.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_edit(self, client: DigitaloceanGenaiSDK) -> None: - with client.images.with_streaming_response.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_generation(self, client: DigitaloceanGenaiSDK) -> None: - image = client.images.create_generation( - prompt="A cute baby sea otter", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_generation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - image = client.images.create_generation( - prompt="A cute baby sea otter", - model="dall-e-3", - n=1, - quality="standard", - response_format="url", - size="1024x1024", - style="vivid", - user="user-1234", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None: - response = client.images.with_raw_response.create_generation( - prompt="A cute baby sea otter", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_generation(self, client: DigitaloceanGenaiSDK) -> None: - with client.images.with_streaming_response.create_generation( - prompt="A cute baby sea otter", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_variation(self, client: DigitaloceanGenaiSDK) -> None: - image = client.images.create_variation( - image=b"raw file contents", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_variation_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - image = client.images.create_variation( - image=b"raw file contents", - model="dall-e-2", - n=1, - response_format="url", - size="1024x1024", - user="user-1234", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None: - response = client.images.with_raw_response.create_variation( - image=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_variation(self, client: DigitaloceanGenaiSDK) -> None: - with client.images.with_streaming_response.create_variation( - image=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncImages: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - image = await async_client.images.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_edit_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - image = await async_client.images.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - mask=b"raw file contents", - model="dall-e-2", - n=1, - response_format="url", - size="1024x1024", - user="user-1234", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.images.with_raw_response.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_edit(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.images.with_streaming_response.create_edit( - image=b"raw file contents", - prompt="A cute baby sea otter wearing a beret", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - image = await async_client.images.create_generation( - prompt="A cute baby sea otter", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_generation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - image = await async_client.images.create_generation( - prompt="A cute baby sea otter", - model="dall-e-3", - n=1, - quality="standard", - response_format="url", - size="1024x1024", - style="vivid", - user="user-1234", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.images.with_raw_response.create_generation( - prompt="A cute baby sea otter", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_generation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.images.with_streaming_response.create_generation( - prompt="A cute baby sea otter", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - image = await async_client.images.create_variation( - image=b"raw file contents", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_variation_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - image = await async_client.images.create_variation( - image=b"raw file contents", - model="dall-e-2", - n=1, - response_format="url", - size="1024x1024", - user="user-1234", - ) - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.images.with_raw_response.create_variation( - image=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_variation(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.images.with_streaming_response.create_variation( - image=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImagesResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index aa215415..1148affb 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import Model, ModelListResponse, ModelDeleteResponse +from digitalocean_genai_sdk.types import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -21,7 +21,7 @@ class TestModels: @parametrize def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: model = client.models.retrieve( - "gpt-4o-mini", + "llama3-8b-instruct", ) assert_matches_type(Model, model, path=["response"]) @@ -29,7 +29,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: response = client.models.with_raw_response.retrieve( - "gpt-4o-mini", + "llama3-8b-instruct", ) assert response.is_closed is True @@ -41,7 +41,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: with client.models.with_streaming_response.retrieve( - "gpt-4o-mini", + "llama3-8b-instruct", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -87,48 +87,6 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - model = client.models.delete( - "ft:gpt-4o-mini:acemeco:suffix:abc123", - ) - assert_matches_type(ModelDeleteResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.models.with_raw_response.delete( - "ft:gpt-4o-mini:acemeco:suffix:abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelDeleteResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.models.with_streaming_response.delete( - "ft:gpt-4o-mini:acemeco:suffix:abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ModelDeleteResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.models.with_raw_response.delete( - "", - ) - class TestAsyncModels: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -137,7 +95,7 @@ class TestAsyncModels: @parametrize async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: model = await async_client.models.retrieve( - "gpt-4o-mini", + "llama3-8b-instruct", ) assert_matches_type(Model, model, path=["response"]) @@ -145,7 +103,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @parametrize async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: response = await async_client.models.with_raw_response.retrieve( - "gpt-4o-mini", + "llama3-8b-instruct", ) assert response.is_closed is True @@ -157,7 +115,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: async with async_client.models.with_streaming_response.retrieve( - "gpt-4o-mini", + "llama3-8b-instruct", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -202,45 +160,3 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena assert_matches_type(ModelListResponse, model, path=["response"]) assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - model = await async_client.models.delete( - "ft:gpt-4o-mini:acemeco:suffix:abc123", - ) - assert_matches_type(ModelDeleteResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.models.with_raw_response.delete( - "ft:gpt-4o-mini:acemeco:suffix:abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelDeleteResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.models.with_streaming_response.delete( - "ft:gpt-4o-mini:acemeco:suffix:abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ModelDeleteResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.models.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py deleted file mode 100644 index 79d34625..00000000 --- a/tests/api_resources/test_moderations.py +++ /dev/null @@ -1,108 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ModerationClassifyResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestModerations: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_classify(self, client: DigitaloceanGenaiSDK) -> None: - moderation = client.moderations.classify( - input="I want to kill them.", - ) - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_classify_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - moderation = client.moderations.classify( - input="I want to kill them.", - model="omni-moderation-2024-09-26", - ) - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_classify(self, client: DigitaloceanGenaiSDK) -> None: - response = client.moderations.with_raw_response.classify( - input="I want to kill them.", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - moderation = response.parse() - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_classify(self, client: DigitaloceanGenaiSDK) -> None: - with client.moderations.with_streaming_response.classify( - input="I want to kill them.", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - moderation = response.parse() - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncModerations: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - moderation = await async_client.moderations.classify( - input="I want to kill them.", - ) - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_classify_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - moderation = await async_client.moderations.classify( - input="I want to kill them.", - model="omni-moderation-2024-09-26", - ) - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.moderations.with_raw_response.classify( - input="I want to kill them.", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - moderation = await response.parse() - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_classify(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.moderations.with_streaming_response.classify( - input="I want to kill them.", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - moderation = await response.parse() - assert_matches_type(ModerationClassifyResponse, moderation, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_organization.py b/tests/api_resources/test_organization.py deleted file mode 100644 index 844ed287..00000000 --- a/tests/api_resources/test_organization.py +++ /dev/null @@ -1,219 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - UsageResponse, - OrganizationListAuditLogsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestOrganization: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_get_costs(self, client: DigitaloceanGenaiSDK) -> None: - organization = client.organization.get_costs( - start_time=0, - ) - assert_matches_type(UsageResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_get_costs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - organization = client.organization.get_costs( - start_time=0, - bucket_width="1d", - end_time=0, - group_by=["project_id"], - limit=0, - page="page", - project_ids=["string"], - ) - assert_matches_type(UsageResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.with_raw_response.get_costs( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - organization = response.parse() - assert_matches_type(UsageResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_get_costs(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.with_streaming_response.get_costs( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - organization = response.parse() - assert_matches_type(UsageResponse, organization, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None: - organization = client.organization.list_audit_logs() - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_audit_logs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - organization = client.organization.list_audit_logs( - actor_emails=["string"], - actor_ids=["string"], - after="after", - before="before", - effective_at={ - "gt": 0, - "gte": 0, - "lt": 0, - "lte": 0, - }, - event_types=["api_key.created"], - limit=0, - project_ids=["string"], - resource_ids=["string"], - ) - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None: - response = client.organization.with_raw_response.list_audit_logs() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - organization = response.parse() - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_audit_logs(self, client: DigitaloceanGenaiSDK) -> None: - with client.organization.with_streaming_response.list_audit_logs() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - organization = response.parse() - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncOrganization: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - organization = await async_client.organization.get_costs( - start_time=0, - ) - assert_matches_type(UsageResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_get_costs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - organization = await async_client.organization.get_costs( - start_time=0, - bucket_width="1d", - end_time=0, - group_by=["project_id"], - limit=0, - page="page", - project_ids=["string"], - ) - assert_matches_type(UsageResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.with_raw_response.get_costs( - start_time=0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - organization = await response.parse() - assert_matches_type(UsageResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_get_costs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.with_streaming_response.get_costs( - start_time=0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - organization = await response.parse() - assert_matches_type(UsageResponse, organization, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - organization = await async_client.organization.list_audit_logs() - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_audit_logs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - organization = await async_client.organization.list_audit_logs( - actor_emails=["string"], - actor_ids=["string"], - after="after", - before="before", - effective_at={ - "gt": 0, - "gte": 0, - "lt": 0, - "lte": 0, - }, - event_types=["api_key.created"], - limit=0, - project_ids=["string"], - resource_ids=["string"], - ) - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.organization.with_raw_response.list_audit_logs() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - organization = await response.parse() - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_audit_logs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.organization.with_streaming_response.list_audit_logs() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - organization = await response.parse() - assert_matches_type(OrganizationListAuditLogsResponse, organization, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_realtime.py b/tests/api_resources/test_realtime.py deleted file mode 100644 index 15797ff9..00000000 --- a/tests/api_resources/test_realtime.py +++ /dev/null @@ -1,269 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - RealtimeCreateSessionResponse, - RealtimeCreateTranscriptionSessionResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRealtime: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_session(self, client: DigitaloceanGenaiSDK) -> None: - realtime = client.realtime.create_session() - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - realtime = client.realtime.create_session( - input_audio_format="pcm16", - input_audio_noise_reduction={"type": "near_field"}, - input_audio_transcription={ - "language": "language", - "model": "model", - "prompt": "prompt", - }, - instructions="instructions", - max_response_output_tokens=0, - modalities=["text"], - model="gpt-4o-realtime-preview", - output_audio_format="pcm16", - temperature=0, - tool_choice="tool_choice", - tools=[ - { - "description": "description", - "name": "name", - "parameters": {}, - "type": "function", - } - ], - turn_detection={ - "create_response": True, - "eagerness": "low", - "interrupt_response": True, - "prefix_padding_ms": 0, - "silence_duration_ms": 0, - "threshold": 0, - "type": "server_vad", - }, - voice="ash", - ) - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_session(self, client: DigitaloceanGenaiSDK) -> None: - response = client.realtime.with_raw_response.create_session() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - realtime = response.parse() - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_session(self, client: DigitaloceanGenaiSDK) -> None: - with client.realtime.with_streaming_response.create_session() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - realtime = response.parse() - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None: - realtime = client.realtime.create_transcription_session() - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_transcription_session_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - realtime = client.realtime.create_transcription_session( - include=["string"], - input_audio_format="pcm16", - input_audio_noise_reduction={"type": "near_field"}, - input_audio_transcription={ - "language": "language", - "model": "gpt-4o-transcribe", - "prompt": "prompt", - }, - modalities=["text"], - turn_detection={ - "create_response": True, - "eagerness": "low", - "interrupt_response": True, - "prefix_padding_ms": 0, - "silence_duration_ms": 0, - "threshold": 0, - "type": "server_vad", - }, - ) - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None: - response = client.realtime.with_raw_response.create_transcription_session() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - realtime = response.parse() - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_transcription_session(self, client: DigitaloceanGenaiSDK) -> None: - with client.realtime.with_streaming_response.create_transcription_session() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - realtime = response.parse() - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncRealtime: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - realtime = await async_client.realtime.create_session() - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_session_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - realtime = await async_client.realtime.create_session( - input_audio_format="pcm16", - input_audio_noise_reduction={"type": "near_field"}, - input_audio_transcription={ - "language": "language", - "model": "model", - "prompt": "prompt", - }, - instructions="instructions", - max_response_output_tokens=0, - modalities=["text"], - model="gpt-4o-realtime-preview", - output_audio_format="pcm16", - temperature=0, - tool_choice="tool_choice", - tools=[ - { - "description": "description", - "name": "name", - "parameters": {}, - "type": "function", - } - ], - turn_detection={ - "create_response": True, - "eagerness": "low", - "interrupt_response": True, - "prefix_padding_ms": 0, - "silence_duration_ms": 0, - "threshold": 0, - "type": "server_vad", - }, - voice="ash", - ) - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.realtime.with_raw_response.create_session() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - realtime = await response.parse() - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.realtime.with_streaming_response.create_session() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - realtime = await response.parse() - assert_matches_type(RealtimeCreateSessionResponse, realtime, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - realtime = await async_client.realtime.create_transcription_session() - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_transcription_session_with_all_params( - self, async_client: AsyncDigitaloceanGenaiSDK - ) -> None: - realtime = await async_client.realtime.create_transcription_session( - include=["string"], - input_audio_format="pcm16", - input_audio_noise_reduction={"type": "near_field"}, - input_audio_transcription={ - "language": "language", - "model": "gpt-4o-transcribe", - "prompt": "prompt", - }, - modalities=["text"], - turn_detection={ - "create_response": True, - "eagerness": "low", - "interrupt_response": True, - "prefix_padding_ms": 0, - "silence_duration_ms": 0, - "threshold": 0, - "type": "server_vad", - }, - ) - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_transcription_session(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.realtime.with_raw_response.create_transcription_session() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - realtime = await response.parse() - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_transcription_session( - self, async_client: AsyncDigitaloceanGenaiSDK - ) -> None: - async with async_client.realtime.with_streaming_response.create_transcription_session() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - realtime = await response.parse() - assert_matches_type(RealtimeCreateTranscriptionSessionResponse, realtime, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py deleted file mode 100644 index 4bd7e367..00000000 --- a/tests/api_resources/test_responses.py +++ /dev/null @@ -1,479 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - Response, - ResponseListInputItemsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestResponses: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.create( - input="string", - model="gpt-4o", - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.create( - input="string", - model="gpt-4o", - include=["file_search_call.results"], - instructions="instructions", - max_output_tokens=0, - metadata={"foo": "string"}, - parallel_tool_calls=True, - previous_response_id="previous_response_id", - reasoning={ - "effort": "low", - "generate_summary": "concise", - }, - store=True, - stream=True, - temperature=1, - text={"format": {"type": "text"}}, - tool_choice="none", - tools=[ - { - "type": "file_search", - "vector_store_ids": ["string"], - "filters": { - "key": "key", - "type": "eq", - "value": "string", - }, - "max_num_results": 0, - "ranking_options": { - "ranker": "auto", - "score_threshold": 0, - }, - } - ], - top_p=1, - truncation="auto", - user="user-1234", - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - http_response = client.responses.with_raw_response.create( - input="string", - model="gpt-4o", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - include=["file_search_call.results"], - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - http_response = client.responses.with_raw_response.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.responses.with_streaming_response.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): - client.responses.with_raw_response.retrieve( - response_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.delete( - "resp_677efb5139a88190b512bc3fef8e535d", - ) - assert response is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - http_response = client.responses.with_raw_response.delete( - "resp_677efb5139a88190b512bc3fef8e535d", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = http_response.parse() - assert response is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.responses.with_streaming_response.delete( - "resp_677efb5139a88190b512bc3fef8e535d", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = http_response.parse() - assert response is None - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): - client.responses.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.list_input_items( - response_id="response_id", - ) - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_input_items_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - response = client.responses.list_input_items( - response_id="response_id", - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: - http_response = client.responses.with_raw_response.list_input_items( - response_id="response_id", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = http_response.parse() - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: - with client.responses.with_streaming_response.list_input_items( - response_id="response_id", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = http_response.parse() - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_input_items(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): - client.responses.with_raw_response.list_input_items( - response_id="", - ) - - -class TestAsyncResponses: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.create( - input="string", - model="gpt-4o", - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.create( - input="string", - model="gpt-4o", - include=["file_search_call.results"], - instructions="instructions", - max_output_tokens=0, - metadata={"foo": "string"}, - parallel_tool_calls=True, - previous_response_id="previous_response_id", - reasoning={ - "effort": "low", - "generate_summary": "concise", - }, - store=True, - stream=True, - temperature=1, - text={"format": {"type": "text"}}, - tool_choice="none", - tools=[ - { - "type": "file_search", - "vector_store_ids": ["string"], - "filters": { - "key": "key", - "type": "eq", - "value": "string", - }, - "max_num_results": 0, - "ranking_options": { - "ranker": "auto", - "score_threshold": 0, - }, - } - ], - top_p=1, - truncation="auto", - user="user-1234", - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - http_response = await async_client.responses.with_raw_response.create( - input="string", - model="gpt-4o", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = await http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = await http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - include=["file_search_call.results"], - ) - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - http_response = await async_client.responses.with_raw_response.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = await http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.responses.with_streaming_response.retrieve( - response_id="resp_677efb5139a88190b512bc3fef8e535d", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = await http_response.parse() - assert_matches_type(Response, response, path=["response"]) - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): - await async_client.responses.with_raw_response.retrieve( - response_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.delete( - "resp_677efb5139a88190b512bc3fef8e535d", - ) - assert response is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - http_response = await async_client.responses.with_raw_response.delete( - "resp_677efb5139a88190b512bc3fef8e535d", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = await http_response.parse() - assert response is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.responses.with_streaming_response.delete( - "resp_677efb5139a88190b512bc3fef8e535d", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = await http_response.parse() - assert response is None - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): - await async_client.responses.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.list_input_items( - response_id="response_id", - ) - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_input_items_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.responses.list_input_items( - response_id="response_id", - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - http_response = await async_client.responses.with_raw_response.list_input_items( - response_id="response_id", - ) - - assert http_response.is_closed is True - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - response = await http_response.parse() - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.responses.with_streaming_response.list_input_items( - response_id="response_id", - ) as http_response: - assert not http_response.is_closed - assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" - - response = await http_response.parse() - assert_matches_type(ResponseListInputItemsResponse, response, path=["response"]) - - assert cast(Any, http_response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_input_items(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): - await async_client.responses.with_raw_response.list_input_items( - response_id="", - ) diff --git a/tests/api_resources/test_threads.py b/tests/api_resources/test_threads.py deleted file mode 100644 index cca5e067..00000000 --- a/tests/api_resources/test_threads.py +++ /dev/null @@ -1,424 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ThreadObject, ThreadDeleteResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestThreads: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - thread = client.threads.create() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - thread = client.threads.create( - messages=[ - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - "metadata": {"foo": "string"}, - } - ], - metadata={"foo": "string"}, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - thread = client.threads.retrieve( - "thread_id", - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.with_raw_response.retrieve( - "thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.with_streaming_response.retrieve( - "thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - thread = client.threads.update( - thread_id="thread_id", - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - thread = client.threads.update( - thread_id="thread_id", - metadata={"foo": "string"}, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": {"vector_store_ids": ["string"]}, - }, - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.with_raw_response.update( - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.with_streaming_response.update( - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.with_raw_response.update( - thread_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - thread = client.threads.delete( - "thread_id", - ) - assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.with_raw_response.delete( - "thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = response.parse() - assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.with_streaming_response.delete( - "thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = response.parse() - assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.with_raw_response.delete( - "", - ) - - -class TestAsyncThreads: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - thread = await async_client.threads.create() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - thread = await async_client.threads.create( - messages=[ - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - "metadata": {"foo": "string"}, - } - ], - metadata={"foo": "string"}, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = await response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = await response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - thread = await async_client.threads.retrieve( - "thread_id", - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.with_raw_response.retrieve( - "thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = await response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.with_streaming_response.retrieve( - "thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = await response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - thread = await async_client.threads.update( - thread_id="thread_id", - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - thread = await async_client.threads.update( - thread_id="thread_id", - metadata={"foo": "string"}, - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": {"vector_store_ids": ["string"]}, - }, - ) - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.with_raw_response.update( - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = await response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.with_streaming_response.update( - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = await response.parse() - assert_matches_type(ThreadObject, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.with_raw_response.update( - thread_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - thread = await async_client.threads.delete( - "thread_id", - ) - assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.with_raw_response.delete( - "thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - thread = await response.parse() - assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.with_streaming_response.delete( - "thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - thread = await response.parse() - assert_matches_type(ThreadDeleteResponse, thread, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py deleted file mode 100644 index 35f52730..00000000 --- a/tests/api_resources/test_uploads.py +++ /dev/null @@ -1,399 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - Upload, - UploadAddPartResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestUploads: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - upload = client.uploads.create( - bytes=0, - filename="filename", - mime_type="mime_type", - purpose="assistants", - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.uploads.with_raw_response.create( - bytes=0, - filename="filename", - mime_type="mime_type", - purpose="assistants", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.uploads.with_streaming_response.create( - bytes=0, - filename="filename", - mime_type="mime_type", - purpose="assistants", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_add_part(self, client: DigitaloceanGenaiSDK) -> None: - upload = client.uploads.add_part( - upload_id="upload_abc123", - data=b"raw file contents", - ) - assert_matches_type(UploadAddPartResponse, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_add_part(self, client: DigitaloceanGenaiSDK) -> None: - response = client.uploads.with_raw_response.add_part( - upload_id="upload_abc123", - data=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = response.parse() - assert_matches_type(UploadAddPartResponse, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add_part(self, client: DigitaloceanGenaiSDK) -> None: - with client.uploads.with_streaming_response.add_part( - upload_id="upload_abc123", - data=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = response.parse() - assert_matches_type(UploadAddPartResponse, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add_part(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): - client.uploads.with_raw_response.add_part( - upload_id="", - data=b"raw file contents", - ) - - @pytest.mark.skip() - @parametrize - def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: - upload = client.uploads.cancel( - "upload_abc123", - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.uploads.with_raw_response.cancel( - "upload_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.uploads.with_streaming_response.cancel( - "upload_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): - client.uploads.with_raw_response.cancel( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_complete(self, client: DigitaloceanGenaiSDK) -> None: - upload = client.uploads.complete( - upload_id="upload_abc123", - part_ids=["string"], - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_complete_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - upload = client.uploads.complete( - upload_id="upload_abc123", - part_ids=["string"], - md5="md5", - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_complete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.uploads.with_raw_response.complete( - upload_id="upload_abc123", - part_ids=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_complete(self, client: DigitaloceanGenaiSDK) -> None: - with client.uploads.with_streaming_response.complete( - upload_id="upload_abc123", - part_ids=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_complete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): - client.uploads.with_raw_response.complete( - upload_id="", - part_ids=["string"], - ) - - -class TestAsyncUploads: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - upload = await async_client.uploads.create( - bytes=0, - filename="filename", - mime_type="mime_type", - purpose="assistants", - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.uploads.with_raw_response.create( - bytes=0, - filename="filename", - mime_type="mime_type", - purpose="assistants", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = await response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.uploads.with_streaming_response.create( - bytes=0, - filename="filename", - mime_type="mime_type", - purpose="assistants", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = await response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - upload = await async_client.uploads.add_part( - upload_id="upload_abc123", - data=b"raw file contents", - ) - assert_matches_type(UploadAddPartResponse, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.uploads.with_raw_response.add_part( - upload_id="upload_abc123", - data=b"raw file contents", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = await response.parse() - assert_matches_type(UploadAddPartResponse, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.uploads.with_streaming_response.add_part( - upload_id="upload_abc123", - data=b"raw file contents", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = await response.parse() - assert_matches_type(UploadAddPartResponse, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add_part(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): - await async_client.uploads.with_raw_response.add_part( - upload_id="", - data=b"raw file contents", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - upload = await async_client.uploads.cancel( - "upload_abc123", - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.uploads.with_raw_response.cancel( - "upload_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = await response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.uploads.with_streaming_response.cancel( - "upload_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = await response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): - await async_client.uploads.with_raw_response.cancel( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - upload = await async_client.uploads.complete( - upload_id="upload_abc123", - part_ids=["string"], - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_complete_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - upload = await async_client.uploads.complete( - upload_id="upload_abc123", - part_ids=["string"], - md5="md5", - ) - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.uploads.with_raw_response.complete( - upload_id="upload_abc123", - part_ids=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - upload = await response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.uploads.with_streaming_response.complete( - upload_id="upload_abc123", - part_ids=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - upload = await response.parse() - assert_matches_type(Upload, upload, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_complete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"): - await async_client.uploads.with_raw_response.complete( - upload_id="", - part_ids=["string"], - ) diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py deleted file mode 100644 index 1c8b5fb0..00000000 --- a/tests/api_resources/test_vector_stores.py +++ /dev/null @@ -1,603 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - VectorStoreObject, - VectorStoreListResponse, - VectorStoreDeleteResponse, - VectorStoreSearchResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestVectorStores: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.create() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.create( - chunking_strategy={"type": "auto"}, - expires_after={ - "anchor": "last_active_at", - "days": 1, - }, - file_ids=["string"], - metadata={"foo": "string"}, - name="name", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.retrieve( - "vector_store_id", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.with_raw_response.retrieve( - "vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.with_streaming_response.retrieve( - "vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.update( - vector_store_id="vector_store_id", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.update( - vector_store_id="vector_store_id", - expires_after={ - "anchor": "last_active_at", - "days": 1, - }, - metadata={"foo": "string"}, - name="name", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.with_raw_response.update( - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.with_streaming_response.update( - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.with_raw_response.update( - vector_store_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.list() - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.list( - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = response.parse() - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = response.parse() - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.delete( - "vector_store_id", - ) - assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.with_raw_response.delete( - "vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = response.parse() - assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.with_streaming_response.delete( - "vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = response.parse() - assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_search(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.search( - vector_store_id="vs_abc123", - query="string", - ) - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_search_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - vector_store = client.vector_stores.search( - vector_store_id="vs_abc123", - query="string", - filters={ - "key": "key", - "type": "eq", - "value": "string", - }, - max_num_results=1, - ranking_options={ - "ranker": "auto", - "score_threshold": 0, - }, - rewrite_query=True, - ) - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_search(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.with_raw_response.search( - vector_store_id="vs_abc123", - query="string", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = response.parse() - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_search(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.with_streaming_response.search( - vector_store_id="vs_abc123", - query="string", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = response.parse() - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_search(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.with_raw_response.search( - vector_store_id="", - query="string", - ) - - -class TestAsyncVectorStores: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.create() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.create( - chunking_strategy={"type": "auto"}, - expires_after={ - "anchor": "last_active_at", - "days": 1, - }, - file_ids=["string"], - metadata={"foo": "string"}, - name="name", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = await response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = await response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.retrieve( - "vector_store_id", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.with_raw_response.retrieve( - "vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = await response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.with_streaming_response.retrieve( - "vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = await response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.update( - vector_store_id="vector_store_id", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.update( - vector_store_id="vector_store_id", - expires_after={ - "anchor": "last_active_at", - "days": 1, - }, - metadata={"foo": "string"}, - name="name", - ) - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.with_raw_response.update( - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = await response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.with_streaming_response.update( - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = await response.parse() - assert_matches_type(VectorStoreObject, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.with_raw_response.update( - vector_store_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.list() - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.list( - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = await response.parse() - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = await response.parse() - assert_matches_type(VectorStoreListResponse, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.delete( - "vector_store_id", - ) - assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.with_raw_response.delete( - "vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = await response.parse() - assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.with_streaming_response.delete( - "vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = await response.parse() - assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.search( - vector_store_id="vs_abc123", - query="string", - ) - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_search_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - vector_store = await async_client.vector_stores.search( - vector_store_id="vs_abc123", - query="string", - filters={ - "key": "key", - "type": "eq", - "value": "string", - }, - max_num_results=1, - ranking_options={ - "ranker": "auto", - "score_threshold": 0, - }, - rewrite_query=True, - ) - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.with_raw_response.search( - vector_store_id="vs_abc123", - query="string", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - vector_store = await response.parse() - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.with_streaming_response.search( - vector_store_id="vs_abc123", - query="string", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - vector_store = await response.parse() - assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_search(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.with_raw_response.search( - vector_store_id="", - query="string", - ) diff --git a/tests/api_resources/threads/__init__.py b/tests/api_resources/threads/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/threads/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/threads/runs/__init__.py b/tests/api_resources/threads/runs/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/threads/runs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/threads/runs/test_steps.py b/tests/api_resources/threads/runs/test_steps.py deleted file mode 100644 index e972e952..00000000 --- a/tests/api_resources/threads/runs/test_steps.py +++ /dev/null @@ -1,307 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.threads.runs import ( - RunStepObject, - StepListResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestSteps: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - step = client.threads.runs.steps.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - ) - assert_matches_type(RunStepObject, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - step = client.threads.runs.steps.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - include=["step_details.tool_calls[*].file_search.results[*].content"], - ) - assert_matches_type(RunStepObject, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.steps.with_raw_response.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - step = response.parse() - assert_matches_type(RunStepObject, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.steps.with_streaming_response.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - step = response.parse() - assert_matches_type(RunStepObject, step, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.steps.with_raw_response.retrieve( - step_id="step_id", - thread_id="", - run_id="run_id", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - client.threads.runs.steps.with_raw_response.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): - client.threads.runs.steps.with_raw_response.retrieve( - step_id="", - thread_id="thread_id", - run_id="run_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - step = client.threads.runs.steps.list( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(StepListResponse, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - step = client.threads.runs.steps.list( - run_id="run_id", - thread_id="thread_id", - after="after", - before="before", - include=["step_details.tool_calls[*].file_search.results[*].content"], - limit=0, - order="asc", - ) - assert_matches_type(StepListResponse, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.steps.with_raw_response.list( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - step = response.parse() - assert_matches_type(StepListResponse, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.steps.with_streaming_response.list( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - step = response.parse() - assert_matches_type(StepListResponse, step, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.steps.with_raw_response.list( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - client.threads.runs.steps.with_raw_response.list( - run_id="", - thread_id="thread_id", - ) - - -class TestAsyncSteps: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - step = await async_client.threads.runs.steps.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - ) - assert_matches_type(RunStepObject, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - step = await async_client.threads.runs.steps.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - include=["step_details.tool_calls[*].file_search.results[*].content"], - ) - assert_matches_type(RunStepObject, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.steps.with_raw_response.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - step = await response.parse() - assert_matches_type(RunStepObject, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.steps.with_streaming_response.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="run_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - step = await response.parse() - assert_matches_type(RunStepObject, step, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.steps.with_raw_response.retrieve( - step_id="step_id", - thread_id="", - run_id="run_id", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - await async_client.threads.runs.steps.with_raw_response.retrieve( - step_id="step_id", - thread_id="thread_id", - run_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): - await async_client.threads.runs.steps.with_raw_response.retrieve( - step_id="", - thread_id="thread_id", - run_id="run_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - step = await async_client.threads.runs.steps.list( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(StepListResponse, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - step = await async_client.threads.runs.steps.list( - run_id="run_id", - thread_id="thread_id", - after="after", - before="before", - include=["step_details.tool_calls[*].file_search.results[*].content"], - limit=0, - order="asc", - ) - assert_matches_type(StepListResponse, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.steps.with_raw_response.list( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - step = await response.parse() - assert_matches_type(StepListResponse, step, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.steps.with_streaming_response.list( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - step = await response.parse() - assert_matches_type(StepListResponse, step, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.steps.with_raw_response.list( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - await async_client.threads.runs.steps.with_raw_response.list( - run_id="", - thread_id="thread_id", - ) diff --git a/tests/api_resources/threads/test_messages.py b/tests/api_resources/threads/test_messages.py deleted file mode 100644 index e1aaf51e..00000000 --- a/tests/api_resources/threads/test_messages.py +++ /dev/null @@ -1,602 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.threads import ( - MessageObject, - MessageListResponse, - MessageDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestMessages: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.create( - thread_id="thread_id", - content="string", - role="user", - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.create( - thread_id="thread_id", - content="string", - role="user", - attachments=[ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - metadata={"foo": "string"}, - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.messages.with_raw_response.create( - thread_id="thread_id", - content="string", - role="user", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.messages.with_streaming_response.create( - thread_id="thread_id", - content="string", - role="user", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.messages.with_raw_response.create( - thread_id="", - content="string", - role="user", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.retrieve( - message_id="message_id", - thread_id="thread_id", - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.messages.with_raw_response.retrieve( - message_id="message_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.messages.with_streaming_response.retrieve( - message_id="message_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.messages.with_raw_response.retrieve( - message_id="message_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): - client.threads.messages.with_raw_response.retrieve( - message_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.update( - message_id="message_id", - thread_id="thread_id", - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.update( - message_id="message_id", - thread_id="thread_id", - metadata={"foo": "string"}, - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.messages.with_raw_response.update( - message_id="message_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.messages.with_streaming_response.update( - message_id="message_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.messages.with_raw_response.update( - message_id="message_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): - client.threads.messages.with_raw_response.update( - message_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.list( - thread_id="thread_id", - ) - assert_matches_type(MessageListResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.list( - thread_id="thread_id", - after="after", - before="before", - limit=0, - order="asc", - run_id="run_id", - ) - assert_matches_type(MessageListResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.messages.with_raw_response.list( - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = response.parse() - assert_matches_type(MessageListResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.messages.with_streaming_response.list( - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = response.parse() - assert_matches_type(MessageListResponse, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.messages.with_raw_response.list( - thread_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - message = client.threads.messages.delete( - message_id="message_id", - thread_id="thread_id", - ) - assert_matches_type(MessageDeleteResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.messages.with_raw_response.delete( - message_id="message_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = response.parse() - assert_matches_type(MessageDeleteResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.messages.with_streaming_response.delete( - message_id="message_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = response.parse() - assert_matches_type(MessageDeleteResponse, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.messages.with_raw_response.delete( - message_id="message_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): - client.threads.messages.with_raw_response.delete( - message_id="", - thread_id="thread_id", - ) - - -class TestAsyncMessages: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.create( - thread_id="thread_id", - content="string", - role="user", - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.create( - thread_id="thread_id", - content="string", - role="user", - attachments=[ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - metadata={"foo": "string"}, - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.messages.with_raw_response.create( - thread_id="thread_id", - content="string", - role="user", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = await response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.messages.with_streaming_response.create( - thread_id="thread_id", - content="string", - role="user", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = await response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.messages.with_raw_response.create( - thread_id="", - content="string", - role="user", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.retrieve( - message_id="message_id", - thread_id="thread_id", - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.messages.with_raw_response.retrieve( - message_id="message_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = await response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.messages.with_streaming_response.retrieve( - message_id="message_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = await response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.messages.with_raw_response.retrieve( - message_id="message_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): - await async_client.threads.messages.with_raw_response.retrieve( - message_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.update( - message_id="message_id", - thread_id="thread_id", - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.update( - message_id="message_id", - thread_id="thread_id", - metadata={"foo": "string"}, - ) - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.messages.with_raw_response.update( - message_id="message_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = await response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.messages.with_streaming_response.update( - message_id="message_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = await response.parse() - assert_matches_type(MessageObject, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.messages.with_raw_response.update( - message_id="message_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): - await async_client.threads.messages.with_raw_response.update( - message_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.list( - thread_id="thread_id", - ) - assert_matches_type(MessageListResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.list( - thread_id="thread_id", - after="after", - before="before", - limit=0, - order="asc", - run_id="run_id", - ) - assert_matches_type(MessageListResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.messages.with_raw_response.list( - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = await response.parse() - assert_matches_type(MessageListResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.messages.with_streaming_response.list( - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = await response.parse() - assert_matches_type(MessageListResponse, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.messages.with_raw_response.list( - thread_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - message = await async_client.threads.messages.delete( - message_id="message_id", - thread_id="thread_id", - ) - assert_matches_type(MessageDeleteResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.messages.with_raw_response.delete( - message_id="message_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - message = await response.parse() - assert_matches_type(MessageDeleteResponse, message, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.messages.with_streaming_response.delete( - message_id="message_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - message = await response.parse() - assert_matches_type(MessageDeleteResponse, message, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.messages.with_raw_response.delete( - message_id="message_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): - await async_client.threads.messages.with_raw_response.delete( - message_id="", - thread_id="thread_id", - ) diff --git a/tests/api_resources/threads/test_runs.py b/tests/api_resources/threads/test_runs.py deleted file mode 100644 index 59716b5e..00000000 --- a/tests/api_resources/threads/test_runs.py +++ /dev/null @@ -1,967 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.threads import ( - RunObject, - RunListResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRuns: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.create( - assistant_id="assistant_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.create( - assistant_id="assistant_id", - instructions="instructions", - max_completion_tokens=256, - max_prompt_tokens=256, - metadata={"foo": "string"}, - model="gpt-4o", - parallel_tool_calls=True, - response_format="auto", - stream=True, - temperature=1, - thread={ - "messages": [ - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - "metadata": {"foo": "string"}, - } - ], - "metadata": {"foo": "string"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, - }, - tool_choice="none", - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": {"vector_store_ids": ["string"]}, - }, - tools=[{"type": "code_interpreter"}], - top_p=1, - truncation_strategy={ - "type": "auto", - "last_messages": 1, - }, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.create( - assistant_id="assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.create( - assistant_id="assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.retrieve( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.retrieve( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.retrieve( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.with_raw_response.retrieve( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - client.threads.runs.with_raw_response.retrieve( - run_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.update( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.update( - run_id="run_id", - thread_id="thread_id", - metadata={"foo": "string"}, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.update( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.update( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.with_raw_response.update( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - client.threads.runs.with_raw_response.update( - run_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.list( - thread_id="thread_id", - ) - assert_matches_type(RunListResponse, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.list( - thread_id="thread_id", - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(RunListResponse, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.list( - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunListResponse, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.list( - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunListResponse, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.with_raw_response.list( - thread_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.cancel( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.cancel( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.cancel( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.with_raw_response.cancel( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - client.threads.runs.with_raw_response.cancel( - run_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_create_run(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_run_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - include=["step_details.tool_calls[*].file_search.results[*].content"], - additional_instructions="additional_instructions", - additional_messages=[ - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - "metadata": {"foo": "string"}, - } - ], - instructions="instructions", - max_completion_tokens=256, - max_prompt_tokens=256, - metadata={"foo": "string"}, - model="gpt-4o", - parallel_tool_calls=True, - reasoning_effort="low", - response_format="auto", - stream=True, - temperature=1, - tool_choice="none", - tools=[{"type": "code_interpreter"}], - top_p=1, - truncation_strategy={ - "type": "auto", - "last_messages": 1, - }, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_run(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_run(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create_run(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.with_raw_response.create_run( - thread_id="", - assistant_id="assistant_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[{}], - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_submit_tool_outputs_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - run = client.threads.runs.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[ - { - "output": "output", - "tool_call_id": "tool_call_id", - } - ], - stream=True, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: - response = client.threads.runs.with_raw_response.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[{}], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: - with client.threads.runs.with_streaming_response.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[{}], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_submit_tool_outputs(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - client.threads.runs.with_raw_response.submit_tool_outputs( - run_id="run_id", - thread_id="", - tool_outputs=[{}], - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - client.threads.runs.with_raw_response.submit_tool_outputs( - run_id="", - thread_id="thread_id", - tool_outputs=[{}], - ) - - -class TestAsyncRuns: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.create( - assistant_id="assistant_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.create( - assistant_id="assistant_id", - instructions="instructions", - max_completion_tokens=256, - max_prompt_tokens=256, - metadata={"foo": "string"}, - model="gpt-4o", - parallel_tool_calls=True, - response_format="auto", - stream=True, - temperature=1, - thread={ - "messages": [ - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - "metadata": {"foo": "string"}, - } - ], - "metadata": {"foo": "string"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["string"]}, - "file_search": { - "vector_store_ids": ["string"], - "vector_stores": [ - { - "chunking_strategy": {"type": "auto"}, - "file_ids": ["string"], - "metadata": {"foo": "string"}, - } - ], - }, - }, - }, - tool_choice="none", - tool_resources={ - "code_interpreter": {"file_ids": ["string"]}, - "file_search": {"vector_store_ids": ["string"]}, - }, - tools=[{"type": "code_interpreter"}], - top_p=1, - truncation_strategy={ - "type": "auto", - "last_messages": 1, - }, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.create( - assistant_id="assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.create( - assistant_id="assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.retrieve( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.retrieve( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.retrieve( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.with_raw_response.retrieve( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - await async_client.threads.runs.with_raw_response.retrieve( - run_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.update( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.update( - run_id="run_id", - thread_id="thread_id", - metadata={"foo": "string"}, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.update( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.update( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.with_raw_response.update( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - await async_client.threads.runs.with_raw_response.update( - run_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.list( - thread_id="thread_id", - ) - assert_matches_type(RunListResponse, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.list( - thread_id="thread_id", - after="after", - before="before", - limit=0, - order="asc", - ) - assert_matches_type(RunListResponse, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.list( - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunListResponse, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.list( - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunListResponse, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.with_raw_response.list( - thread_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.cancel( - run_id="run_id", - thread_id="thread_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.cancel( - run_id="run_id", - thread_id="thread_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.cancel( - run_id="run_id", - thread_id="thread_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.with_raw_response.cancel( - run_id="run_id", - thread_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - await async_client.threads.runs.with_raw_response.cancel( - run_id="", - thread_id="thread_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_run_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - include=["step_details.tool_calls[*].file_search.results[*].content"], - additional_instructions="additional_instructions", - additional_messages=[ - { - "content": "string", - "role": "user", - "attachments": [ - { - "file_id": "file_id", - "tools": [{"type": "code_interpreter"}], - } - ], - "metadata": {"foo": "string"}, - } - ], - instructions="instructions", - max_completion_tokens=256, - max_prompt_tokens=256, - metadata={"foo": "string"}, - model="gpt-4o", - parallel_tool_calls=True, - reasoning_effort="low", - response_format="auto", - stream=True, - temperature=1, - tool_choice="none", - tools=[{"type": "code_interpreter"}], - top_p=1, - truncation_strategy={ - "type": "auto", - "last_messages": 1, - }, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.create_run( - thread_id="thread_id", - assistant_id="assistant_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create_run(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.with_raw_response.create_run( - thread_id="", - assistant_id="assistant_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[{}], - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_submit_tool_outputs_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - run = await async_client.threads.runs.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[ - { - "output": "output", - "tool_call_id": "tool_call_id", - } - ], - stream=True, - ) - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.threads.runs.with_raw_response.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[{}], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.threads.runs.with_streaming_response.submit_tool_outputs( - run_id="run_id", - thread_id="thread_id", - tool_outputs=[{}], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - run = await response.parse() - assert_matches_type(RunObject, run, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_submit_tool_outputs(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): - await async_client.threads.runs.with_raw_response.submit_tool_outputs( - run_id="run_id", - thread_id="", - tool_outputs=[{}], - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): - await async_client.threads.runs.with_raw_response.submit_tool_outputs( - run_id="", - thread_id="thread_id", - tool_outputs=[{}], - ) diff --git a/tests/api_resources/vector_stores/__init__.py b/tests/api_resources/vector_stores/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/vector_stores/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py deleted file mode 100644 index 47897412..00000000 --- a/tests/api_resources/vector_stores/test_file_batches.py +++ /dev/null @@ -1,479 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.vector_stores import ( - VectorStoreFileBatchObject, - ListVectorStoreFilesResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFileBatches: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - file_batch = client.vector_stores.file_batches.create( - vector_store_id="vs_abc123", - file_ids=["string"], - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - file_batch = client.vector_stores.file_batches.create( - vector_store_id="vs_abc123", - file_ids=["string"], - attributes={"foo": "string"}, - chunking_strategy={"type": "auto"}, - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.file_batches.with_raw_response.create( - vector_store_id="vs_abc123", - file_ids=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.file_batches.with_streaming_response.create( - vector_store_id="vs_abc123", - file_ids=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.create( - vector_store_id="", - file_ids=["string"], - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - file_batch = client.vector_stores.file_batches.retrieve( - batch_id="vsfb_abc123", - vector_store_id="vs_abc123", - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.file_batches.with_raw_response.retrieve( - batch_id="vsfb_abc123", - vector_store_id="vs_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.file_batches.with_streaming_response.retrieve( - batch_id="vsfb_abc123", - vector_store_id="vs_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.retrieve( - batch_id="vsfb_abc123", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.retrieve( - batch_id="", - vector_store_id="vs_abc123", - ) - - @pytest.mark.skip() - @parametrize - def test_method_cancel(self, client: DigitaloceanGenaiSDK) -> None: - file_batch = client.vector_stores.file_batches.cancel( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.file_batches.with_raw_response.cancel( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.file_batches.with_streaming_response.cancel( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.cancel( - batch_id="batch_id", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.cancel( - batch_id="", - vector_store_id="vector_store_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_files(self, client: DigitaloceanGenaiSDK) -> None: - file_batch = client.vector_stores.file_batches.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_files_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - file_batch = client.vector_stores.file_batches.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - after="after", - before="before", - filter="in_progress", - limit=0, - order="asc", - ) - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_files(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.file_batches.with_raw_response.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_files(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.file_batches.with_streaming_response.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_files(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.list_files( - batch_id="batch_id", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - client.vector_stores.file_batches.with_raw_response.list_files( - batch_id="", - vector_store_id="vector_store_id", - ) - - -class TestAsyncFileBatches: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file_batch = await async_client.vector_stores.file_batches.create( - vector_store_id="vs_abc123", - file_ids=["string"], - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file_batch = await async_client.vector_stores.file_batches.create( - vector_store_id="vs_abc123", - file_ids=["string"], - attributes={"foo": "string"}, - chunking_strategy={"type": "auto"}, - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.file_batches.with_raw_response.create( - vector_store_id="vs_abc123", - file_ids=["string"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = await response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.file_batches.with_streaming_response.create( - vector_store_id="vs_abc123", - file_ids=["string"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = await response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.create( - vector_store_id="", - file_ids=["string"], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file_batch = await async_client.vector_stores.file_batches.retrieve( - batch_id="vsfb_abc123", - vector_store_id="vs_abc123", - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.file_batches.with_raw_response.retrieve( - batch_id="vsfb_abc123", - vector_store_id="vs_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = await response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.file_batches.with_streaming_response.retrieve( - batch_id="vsfb_abc123", - vector_store_id="vs_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = await response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.retrieve( - batch_id="vsfb_abc123", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.retrieve( - batch_id="", - vector_store_id="vs_abc123", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file_batch = await async_client.vector_stores.file_batches.cancel( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.file_batches.with_raw_response.cancel( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = await response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.file_batches.with_streaming_response.cancel( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = await response.parse() - assert_matches_type(VectorStoreFileBatchObject, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.cancel( - batch_id="batch_id", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.cancel( - batch_id="", - vector_store_id="vector_store_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file_batch = await async_client.vector_stores.file_batches.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_files_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file_batch = await async_client.vector_stores.file_batches.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - after="after", - before="before", - filter="in_progress", - limit=0, - order="asc", - ) - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.file_batches.with_raw_response.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file_batch = await response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.file_batches.with_streaming_response.list_files( - batch_id="batch_id", - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file_batch = await response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file_batch, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_files(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.list_files( - batch_id="batch_id", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"): - await async_client.vector_stores.file_batches.with_raw_response.list_files( - batch_id="", - vector_store_id="vector_store_id", - ) diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py deleted file mode 100644 index b93fe1b4..00000000 --- a/tests/api_resources/vector_stores/test_files.py +++ /dev/null @@ -1,677 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.vector_stores import ( - FileDeleteResponse, - VectorStoreFileObject, - FileRetrieveContentResponse, - ListVectorStoreFilesResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFiles: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.create( - vector_store_id="vs_abc123", - file_id="file_id", - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.create( - vector_store_id="vs_abc123", - file_id="file_id", - attributes={"foo": "string"}, - chunking_strategy={"type": "auto"}, - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.files.with_raw_response.create( - vector_store_id="vs_abc123", - file_id="file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.files.with_streaming_response.create( - vector_store_id="vs_abc123", - file_id="file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.files.with_raw_response.create( - vector_store_id="", - file_id="file_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.retrieve( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.files.with_raw_response.retrieve( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.files.with_streaming_response.retrieve( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.files.with_raw_response.retrieve( - file_id="file-abc123", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.vector_stores.files.with_raw_response.retrieve( - file_id="", - vector_store_id="vs_abc123", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.update( - file_id="file-abc123", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.files.with_raw_response.update( - file_id="file-abc123", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.files.with_streaming_response.update( - file_id="file-abc123", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.files.with_raw_response.update( - file_id="file-abc123", - vector_store_id="", - attributes={"foo": "string"}, - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.vector_stores.files.with_raw_response.update( - file_id="", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.list( - vector_store_id="vector_store_id", - ) - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.list( - vector_store_id="vector_store_id", - after="after", - before="before", - filter="in_progress", - limit=0, - order="asc", - ) - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.files.with_raw_response.list( - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.files.with_streaming_response.list( - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.files.with_raw_response.list( - vector_store_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.delete( - file_id="file_id", - vector_store_id="vector_store_id", - ) - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.files.with_raw_response.delete( - file_id="file_id", - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.files.with_streaming_response.delete( - file_id="file_id", - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.files.with_raw_response.delete( - file_id="file_id", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.vector_stores.files.with_raw_response.delete( - file_id="", - vector_store_id="vector_store_id", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - file = client.vector_stores.files.retrieve_content( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - response = client.vector_stores.files.with_raw_response.retrieve_content( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = response.parse() - assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - with client.vector_stores.files.with_streaming_response.retrieve_content( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = response.parse() - assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_content(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - client.vector_stores.files.with_raw_response.retrieve_content( - file_id="file-abc123", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - client.vector_stores.files.with_raw_response.retrieve_content( - file_id="", - vector_store_id="vs_abc123", - ) - - -class TestAsyncFiles: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.create( - vector_store_id="vs_abc123", - file_id="file_id", - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.create( - vector_store_id="vs_abc123", - file_id="file_id", - attributes={"foo": "string"}, - chunking_strategy={"type": "auto"}, - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.files.with_raw_response.create( - vector_store_id="vs_abc123", - file_id="file_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.files.with_streaming_response.create( - vector_store_id="vs_abc123", - file_id="file_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.create( - vector_store_id="", - file_id="file_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.retrieve( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.files.with_raw_response.retrieve( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.files.with_streaming_response.retrieve( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.retrieve( - file_id="file-abc123", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.retrieve( - file_id="", - vector_store_id="vs_abc123", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.update( - file_id="file-abc123", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.files.with_raw_response.update( - file_id="file-abc123", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.files.with_streaming_response.update( - file_id="file-abc123", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(VectorStoreFileObject, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.update( - file_id="file-abc123", - vector_store_id="", - attributes={"foo": "string"}, - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.update( - file_id="", - vector_store_id="vs_abc123", - attributes={"foo": "string"}, - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.list( - vector_store_id="vector_store_id", - ) - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.list( - vector_store_id="vector_store_id", - after="after", - before="before", - filter="in_progress", - limit=0, - order="asc", - ) - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.files.with_raw_response.list( - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.files.with_streaming_response.list( - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(ListVectorStoreFilesResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.list( - vector_store_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.delete( - file_id="file_id", - vector_store_id="vector_store_id", - ) - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.files.with_raw_response.delete( - file_id="file_id", - vector_store_id="vector_store_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.files.with_streaming_response.delete( - file_id="file_id", - vector_store_id="vector_store_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(FileDeleteResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.delete( - file_id="file_id", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.delete( - file_id="", - vector_store_id="vector_store_id", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - file = await async_client.vector_stores.files.retrieve_content( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.vector_stores.files.with_raw_response.retrieve_content( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - file = await response.parse() - assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.vector_stores.files.with_streaming_response.retrieve_content( - file_id="file-abc123", - vector_store_id="vs_abc123", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - file = await response.parse() - assert_matches_type(FileRetrieveContentResponse, file, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_content(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.retrieve_content( - file_id="file-abc123", - vector_store_id="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): - await async_client.vector_stores.files.with_raw_response.retrieve_content( - file_id="", - vector_store_id="vs_abc123", - ) diff --git a/tests/test_client.py b/tests/test_client.py index f24c8c5f..7ac3aae1 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,20 +23,17 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError from digitalocean_genai_sdk._types import Omit +from digitalocean_genai_sdk._utils import maybe_transform from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER -from digitalocean_genai_sdk._exceptions import ( - APIStatusError, - APITimeoutError, - DigitaloceanGenaiSDKError, - APIResponseValidationError, -) +from digitalocean_genai_sdk._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from digitalocean_genai_sdk._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options, ) +from digitalocean_genai_sdk.types.chat.completion_create_params import CompletionCreateParams from .utils import update_env @@ -339,16 +336,6 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" - def test_validate_headers(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) - assert request.headers.get("Authorization") == f"Bearer {api_key}" - - with pytest.raises(DigitaloceanGenaiSDKError): - with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): - client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) - _ = client2 - def test_default_query_option(self) -> None: client = DigitaloceanGenaiSDK( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} @@ -727,20 +714,58 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}) + self.client.post( + "/chat/completions", + body=cast( + object, + maybe_transform( + dict( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ), + CompletionCreateParams, + ), + ), + cast_to=httpx.Response, + options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, + ) assert _get_open_connections(self.client) == 0 @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.get("/assistants").mock(return_value=httpx.Response(500)) + respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.get("/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}) + self.client.post( + "/chat/completions", + body=cast( + object, + maybe_transform( + dict( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ), + CompletionCreateParams, + ), + ), + cast_to=httpx.Response, + options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, + ) assert _get_open_connections(self.client) == 0 @@ -768,9 +793,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/assistants").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.assistants.with_raw_response.list() + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -792,9 +825,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/assistants").mock(side_effect=retry_handler) - - response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": Omit()}, + ) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -815,9 +857,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/assistants").mock(side_effect=retry_handler) - - response = client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": "42"}, + ) assert response.http_request.headers.get("x-stainless-retry-count") == "42" @@ -1128,16 +1179,6 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" - def test_validate_headers(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) - assert request.headers.get("Authorization") == f"Bearer {api_key}" - - with pytest.raises(DigitaloceanGenaiSDKError): - with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): - client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) - _ = client2 - def test_default_query_option(self) -> None: client = AsyncDigitaloceanGenaiSDK( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} @@ -1520,11 +1561,28 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.get("/assistants").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.get( - "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}} + await self.client.post( + "/chat/completions", + body=cast( + object, + maybe_transform( + dict( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ), + CompletionCreateParams, + ), + ), + cast_to=httpx.Response, + options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) assert _get_open_connections(self.client) == 0 @@ -1532,11 +1590,28 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.get("/assistants").mock(return_value=httpx.Response(500)) + respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.get( - "/assistants", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}} + await self.client.post( + "/chat/completions", + body=cast( + object, + maybe_transform( + dict( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ), + CompletionCreateParams, + ), + ), + cast_to=httpx.Response, + options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) assert _get_open_connections(self.client) == 0 @@ -1566,9 +1641,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/assistants").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.assistants.with_raw_response.list() + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1591,9 +1674,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/assistants").mock(side_effect=retry_handler) - - response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": Omit()}, + ) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -1615,9 +1707,18 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/assistants").mock(side_effect=retry_handler) - - response = await client.assistants.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": "42"}, + ) assert response.http_request.headers.get("x-stainless-retry-count") == "42" From 5d78af61b9230d5525cdb4caa6e5a0905f5a8cf3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 13:18:36 +0000 Subject: [PATCH 008/200] feat(api): update via SDK Studio --- .github/workflows/publish-pypi.yml | 31 - .github/workflows/release-doctor.yml | 21 - .release-please-manifest.json | 3 - .stats.yml | 4 +- CONTRIBUTING.md | 4 +- LICENSE | 202 +++- README.md | 107 +- api.md | 330 +++++- bin/check-release-environment | 21 - pyproject.toml | 12 +- release-please-config.json | 66 -- requirements-dev.lock | 12 +- requirements.lock | 12 +- src/digitalocean_genai_sdk/_client.py | 16 +- src/digitalocean_genai_sdk/_version.py | 2 +- .../resources/__init__.py | 14 + .../{chat/completions.py => chat.py} | 118 ++- .../resources/chat/__init__.py | 33 - .../resources/chat/chat.py | 102 -- .../resources/embeddings.py | 8 +- .../resources/genai/__init__.py | 103 ++ .../resources/genai/agents/__init__.py | 89 ++ .../resources/genai/agents/agents.py | 965 ++++++++++++++++++ .../resources/genai/agents/api_keys.py | 581 +++++++++++ .../resources/genai/agents/child_agents.py | 508 +++++++++ .../resources/genai/agents/functions.py | 421 ++++++++ .../resources/genai/agents/knowledge_bases.py | 346 +++++++ .../resources/genai/agents/versions.py | 298 ++++++ .../resources/genai/auth/__init__.py | 33 + .../resources/genai/auth/agents/__init__.py | 33 + .../resources/genai/auth/agents/agents.py | 102 ++ .../resources/genai/auth/agents/token.py | 173 ++++ .../resources/genai/auth/auth.py | 102 ++ .../resources/genai/genai.py | 383 +++++++ .../resources/genai/indexing_jobs.py | 543 ++++++++++ .../genai/knowledge_bases/__init__.py | 33 + .../genai/knowledge_bases/data_sources.py | 410 ++++++++ .../genai/knowledge_bases/knowledge_bases.py | 667 ++++++++++++ .../resources/genai/models/__init__.py | 33 + .../resources/genai/models/api_keys.py | 529 ++++++++++ .../resources/genai/models/models.py | 282 +++++ .../resources/genai/providers/__init__.py | 47 + .../genai/providers/anthropic/__init__.py | 33 + .../genai/providers/anthropic/anthropic.py | 102 ++ .../genai/providers/anthropic/keys.py | 667 ++++++++++++ .../genai/providers/openai/__init__.py | 33 + .../resources/genai/providers/openai/keys.py | 663 ++++++++++++ .../genai/providers/openai/openai.py | 102 ++ .../resources/genai/providers/providers.py | 134 +++ .../resources/models.py | 8 +- src/digitalocean_genai_sdk/types/__init__.py | 10 +- .../types/chat/__init__.py | 12 - .../types/chat/response_message.py | 19 - .../types/chat/usage.py | 16 - ...equest_message_content_part_text_param.py} | 4 +- .../chat_completion_stream_options_param.py | 20 - ...ob.py => chat_completion_token_logprob.py} | 6 +- ...ms.py => chat_create_completion_params.py} | 36 +- ....py => chat_create_completion_response.py} | 38 +- .../types/genai/__init__.py | 42 + .../types/genai/agent_create_params.py | 39 + .../types/genai/agent_create_response.py | 22 + .../types/genai/agent_delete_response.py | 22 + .../types/genai/agent_list_params.py | 18 + .../types/genai/agent_list_response.py | 198 ++++ .../types/genai/agent_retrieve_response.py | 22 + .../types/genai/agent_update_params.py | 65 ++ .../types/genai/agent_update_response.py | 22 + .../types/genai/agent_update_status_params.py | 16 + .../genai/agent_update_status_response.py | 22 + .../types/genai/agents/__init__.py | 32 + .../genai/agents/api_agent_api_key_info.py | 22 + .../genai/agents/api_key_create_params.py | 15 + .../genai/agents/api_key_create_response.py | 12 + .../genai/agents/api_key_delete_response.py | 12 + .../types/genai/agents/api_key_list_params.py | 15 + .../genai/agents/api_key_list_response.py | 18 + .../agents/api_key_regenerate_response.py | 12 + .../genai/agents/api_key_update_params.py | 19 + .../genai/agents/api_key_update_response.py | 12 + .../agents/api_link_knowledge_base_output.py | 22 + .../types/genai/agents/api_links.py | 21 + .../types/genai/agents/api_meta.py | 15 + .../genai/agents/child_agent_add_params.py | 22 + .../genai/agents/child_agent_add_response.py | 14 + .../agents/child_agent_delete_response.py | 13 + .../genai/agents/child_agent_update_params.py | 24 + .../agents/child_agent_update_response.py | 18 + .../genai/agents/child_agent_view_response.py | 22 + .../genai/agents/function_create_params.py | 25 + .../genai/agents/function_create_response.py | 22 + .../genai/agents/function_delete_response.py | 22 + .../genai/agents/function_update_params.py | 29 + .../genai/agents/function_update_response.py | 22 + .../agents/knowledge_base_detach_response.py | 22 + .../types/genai/agents/version_list_params.py | 15 + .../genai/agents/version_list_response.py | 118 +++ .../genai/agents/version_update_params.py | 15 + .../genai/agents/version_update_response.py | 30 + .../types/genai/api_agent.py | 286 ++++++ .../types/genai/api_agreement.py | 17 + .../types/genai/api_deployment_visibility.py | 9 + .../types/genai/api_indexing_job.py | 43 + .../types/genai/api_knowledge_base.py | 37 + .../types/genai/api_model.py | 57 ++ .../types/genai/api_model_version.py | 15 + .../types/genai/api_retrieval_method.py | 13 + .../{fine_tuning => genai/auth}/__init__.py | 0 .../types/genai/auth/agents/__init__.py | 6 + .../genai/auth/agents/token_create_params.py | 13 + .../auth/agents/token_create_response.py | 13 + .../types/genai/indexing_job_create_params.py | 14 + .../genai/indexing_job_create_response.py | 12 + .../types/genai/indexing_job_list_params.py | 15 + .../types/genai/indexing_job_list_response.py | 18 + ...xing_job_retrieve_data_sources_response.py | 52 + .../genai/indexing_job_retrieve_response.py | 12 + .../indexing_job_update_cancel_params.py | 14 + .../indexing_job_update_cancel_response.py | 12 + .../genai/knowledge_base_create_params.py | 64 ++ .../genai/knowledge_base_create_response.py | 12 + .../genai/knowledge_base_delete_response.py | 11 + .../types/genai/knowledge_base_list_params.py | 15 + .../genai/knowledge_base_list_response.py | 18 + .../genai/knowledge_base_retrieve_response.py | 30 + .../genai/knowledge_base_update_params.py | 27 + .../genai/knowledge_base_update_response.py | 12 + .../types/genai/knowledge_bases/__init__.py | 16 + .../api_file_upload_data_source.py | 15 + .../api_file_upload_data_source_param.py | 15 + .../api_knowledge_base_data_source.py | 35 + .../knowledge_bases/api_spaces_data_source.py | 15 + .../api_spaces_data_source_param.py | 15 + .../api_web_crawler_data_source.py | 26 + .../api_web_crawler_data_source_param.py | 25 + .../data_source_create_params.py | 33 + .../data_source_create_response.py | 12 + .../data_source_delete_response.py | 13 + .../data_source_list_params.py | 15 + .../data_source_list_response.py | 18 + .../types/genai/model_list_params.py | 42 + .../types/genai/model_list_response.py | 42 + .../types/genai/models/__init__.py | 13 + .../genai/models/api_key_create_params.py | 11 + .../genai/models/api_key_create_response.py | 12 + .../genai/models/api_key_delete_response.py | 12 + .../types/genai/models/api_key_list_params.py | 15 + .../genai/models/api_key_list_response.py | 18 + .../genai/models/api_key_update_params.py | 15 + .../api_key_update_regenerate_response.py | 12 + .../genai/models/api_key_update_response.py | 12 + .../genai/models/api_model_api_key_info.py | 22 + .../providers}/__init__.py | 0 .../genai/providers/anthropic/__init__.py | 15 + .../anthropic/api_anthropic_api_key_info.py | 22 + .../providers/anthropic/key_create_params.py | 13 + .../anthropic/key_create_response.py | 12 + .../anthropic/key_delete_response.py | 12 + .../anthropic/key_list_agents_params.py | 15 + .../anthropic/key_list_agents_response.py | 28 + .../providers/anthropic/key_list_params.py | 15 + .../providers/anthropic/key_list_response.py | 18 + .../anthropic/key_retrieve_response.py | 12 + .../providers/anthropic/key_update_params.py | 17 + .../anthropic/key_update_response.py | 12 + .../types/genai/providers/openai/__init__.py | 15 + .../openai/api_openai_api_key_info.py | 25 + .../providers/openai/key_create_params.py | 13 + .../providers/openai/key_create_response.py | 12 + .../providers/openai/key_delete_response.py | 12 + .../genai/providers/openai/key_list_params.py | 15 + .../providers/openai/key_list_response.py | 18 + .../openai/key_retrieve_agents_params.py | 15 + .../openai/key_retrieve_agents_response.py | 28 + .../providers/openai/key_retrieve_response.py | 12 + .../providers/openai/key_update_params.py | 17 + .../providers/openai/key_update_response.py | 12 + .../types/genai_retrieve_regions_params.py | 15 + .../types/genai_retrieve_regions_response.py | 23 + .../types/stop_configuration_param.py | 10 - .../types/threads/runs/__init__.py | 3 - .../types/vector_stores/__init__.py | 3 - .../api_resources/{chat => genai}/__init__.py | 0 .../api_resources/genai/agents}/__init__.py | 2 - .../genai/agents/test_api_keys.py | 572 +++++++++++ .../genai/agents/test_child_agents.py | 485 +++++++++ .../genai/agents/test_functions.py | 382 +++++++ .../genai/agents/test_knowledge_bases.py | 314 ++++++ .../genai/agents/test_versions.py | 233 +++++ .../api_resources/genai/auth}/__init__.py | 2 - .../genai/auth/agents}/__init__.py | 2 - .../genai/auth/agents/test_token.py | 124 +++ .../genai/knowledge_bases}/__init__.py | 2 - .../knowledge_bases/test_data_sources.py | 374 +++++++ tests/api_resources/genai/models/__init__.py | 1 + .../genai/models/test_api_keys.py | 446 ++++++++ .../api_resources/genai/providers/__init__.py | 1 + .../genai/providers/anthropic/__init__.py | 1 + .../genai/providers/anthropic/test_keys.py | 555 ++++++++++ .../genai/providers/openai/__init__.py | 1 + .../genai/providers/openai/test_keys.py | 555 ++++++++++ tests/api_resources/genai/test_agents.py | 597 +++++++++++ .../api_resources/genai/test_indexing_jobs.py | 446 ++++++++ .../genai/test_knowledge_bases.py | 510 +++++++++ tests/api_resources/genai/test_models.py | 100 ++ .../test_completions.py => test_chat.py} | 62 +- tests/api_resources/test_genai.py | 96 ++ tests/test_client.py | 197 ++-- 208 files changed, 17915 insertions(+), 760 deletions(-) delete mode 100644 .github/workflows/publish-pypi.yml delete mode 100644 .github/workflows/release-doctor.yml delete mode 100644 .release-please-manifest.json delete mode 100644 bin/check-release-environment delete mode 100644 release-please-config.json rename src/digitalocean_genai_sdk/resources/{chat/completions.py => chat.py} (80%) delete mode 100644 src/digitalocean_genai_sdk/resources/chat/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/chat/chat.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/agents.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/functions.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/agents/versions.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/auth/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/auth/agents/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/auth/auth.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/genai.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/knowledge_bases/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/models/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/models/api_keys.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/models/models.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/anthropic/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/openai/__init__.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py create mode 100644 src/digitalocean_genai_sdk/resources/genai/providers/providers.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/response_message.py delete mode 100644 src/digitalocean_genai_sdk/types/chat/usage.py rename src/digitalocean_genai_sdk/types/{chat/request_message_content_part_text_param.py => chat_completion_request_message_content_part_text_param.py} (68%) delete mode 100644 src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py rename src/digitalocean_genai_sdk/types/{chat/token_logprob.py => chat_completion_token_logprob.py} (92%) rename src/digitalocean_genai_sdk/types/{chat/completion_create_params.py => chat_create_completion_params.py} (83%) rename src/digitalocean_genai_sdk/types/{chat/create_response.py => chat_create_completion_response.py} (60%) create mode 100644 src/digitalocean_genai_sdk/types/genai/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_links.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/api_meta.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/version_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_agent.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_agreement.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_deployment_visibility.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_indexing_job.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_model.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_model_version.py create mode 100644 src/digitalocean_genai_sdk/types/genai/api_retrieval_method.py rename src/digitalocean_genai_sdk/types/{fine_tuning => genai/auth}/__init__.py (100%) create mode 100644 src/digitalocean_genai_sdk/types/genai/auth/agents/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source_param.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source_param.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source_param.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/model_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/model_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py rename src/digitalocean_genai_sdk/types/{fine_tuning/checkpoints => genai/providers}/__init__.py (100%) create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/__init__.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py create mode 100644 src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py create mode 100644 src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py delete mode 100644 src/digitalocean_genai_sdk/types/stop_configuration_param.py delete mode 100644 src/digitalocean_genai_sdk/types/threads/runs/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/vector_stores/__init__.py rename tests/api_resources/{chat => genai}/__init__.py (100%) rename {src/digitalocean_genai_sdk/types/organization/projects => tests/api_resources/genai/agents}/__init__.py (70%) create mode 100644 tests/api_resources/genai/agents/test_api_keys.py create mode 100644 tests/api_resources/genai/agents/test_child_agents.py create mode 100644 tests/api_resources/genai/agents/test_functions.py create mode 100644 tests/api_resources/genai/agents/test_knowledge_bases.py create mode 100644 tests/api_resources/genai/agents/test_versions.py rename {src/digitalocean_genai_sdk/types/threads => tests/api_resources/genai/auth}/__init__.py (70%) rename {src/digitalocean_genai_sdk/types/fine_tuning/jobs => tests/api_resources/genai/auth/agents}/__init__.py (70%) create mode 100644 tests/api_resources/genai/auth/agents/test_token.py rename {src/digitalocean_genai_sdk/types/organization => tests/api_resources/genai/knowledge_bases}/__init__.py (70%) create mode 100644 tests/api_resources/genai/knowledge_bases/test_data_sources.py create mode 100644 tests/api_resources/genai/models/__init__.py create mode 100644 tests/api_resources/genai/models/test_api_keys.py create mode 100644 tests/api_resources/genai/providers/__init__.py create mode 100644 tests/api_resources/genai/providers/anthropic/__init__.py create mode 100644 tests/api_resources/genai/providers/anthropic/test_keys.py create mode 100644 tests/api_resources/genai/providers/openai/__init__.py create mode 100644 tests/api_resources/genai/providers/openai/test_keys.py create mode 100644 tests/api_resources/genai/test_agents.py create mode 100644 tests/api_resources/genai/test_indexing_jobs.py create mode 100644 tests/api_resources/genai/test_knowledge_bases.py create mode 100644 tests/api_resources/genai/test_models.py rename tests/api_resources/{chat/test_completions.py => test_chat.py} (65%) create mode 100644 tests/api_resources/test_genai.py diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml deleted file mode 100644 index 2bc5b4b2..00000000 --- a/.github/workflows/publish-pypi.yml +++ /dev/null @@ -1,31 +0,0 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to PyPI in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml -name: Publish PyPI -on: - workflow_dispatch: - - release: - types: [published] - -jobs: - publish: - name: publish - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Install Rye - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml deleted file mode 100644 index 0f23cbc4..00000000 --- a/.github/workflows/release-doctor.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Release Doctor -on: - pull_request: - branches: - - main - workflow_dispatch: - -jobs: - release_doctor: - name: release doctor - runs-on: ubuntu-latest - if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') - - steps: - - uses: actions/checkout@v4 - - - name: Check release environment - run: | - bash ./bin/check-release-environment - env: - PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json deleted file mode 100644 index c4762802..00000000 --- a/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - ".": "0.0.1-alpha.0" -} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 6b91fe37..ae35d0c6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 4 +configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 2da74b81015f4ef6cad3a0bcb9025834 +config_hash: a81446c4c386cd3e02581510d751ebe5 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d5d60a7..4e0d206f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/genai-python.git +$ pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/LICENSE b/LICENSE index 9c99266b..0c1fe1d5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,201 @@ -Copyright 2025 digitalocean-genai-sdk + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + 1. Definitions. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Digitalocean Genai SDK + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index bdaea964..8ea43f55 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Digitalocean Genai SDK Python API library -[![PyPI version](https://img.shields.io/pypi/v/do-genai.svg)](https://pypi.org/project/do-genai/) +[![PyPI version](https://img.shields.io/pypi/v/digitalocean_genai_sdk.svg)](https://pypi.org/project/digitalocean_genai_sdk/) The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -10,15 +10,18 @@ It is generated with [Stainless](https://www.stainless.com/). ## Documentation -The REST API documentation can be found on [help.openai.com](https://help.openai.com/). The full API of this library can be found in [api.md](api.md). +The REST API documentation can be found on [developers.digitalocean.com](https://developers.digitalocean.com/documentation/v2/). The full API of this library can be found in [api.md](api.md). ## Installation ```sh -# install from PyPI -pip install --pre do-genai +# install from this staging repo +pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git ``` +> [!NOTE] +> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre digitalocean_genai_sdk` + ## Usage The full API of this library can be found in [api.md](api.md). @@ -33,16 +36,10 @@ client = DigitaloceanGenaiSDK( ), # This is the default and can be omitted ) -create_response = client.chat.completions.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", +versions = client.genai.agents.versions.list( + uuid="REPLACE_ME", ) -print(create_response.id) +print(versions.agent_versions) ``` While you can provide an `api_key` keyword argument, @@ -67,16 +64,10 @@ client = AsyncDigitaloceanGenaiSDK( async def main() -> None: - create_response = await client.chat.completions.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", + versions = await client.genai.agents.versions.list( + uuid="REPLACE_ME", ) - print(create_response.id) + print(versions.agent_versions) asyncio.run(main()) @@ -102,17 +93,11 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() -create_response = client.chat.completions.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - stream_options={}, +data_source = client.genai.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={}, ) -print(create_response.stream_options) +print(data_source.aws_data_source) ``` ## Handling errors @@ -131,14 +116,8 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() try: - client.chat.completions.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", + client.genai.agents.versions.list( + uuid="REPLACE_ME", ) except digitalocean_genai_sdk.APIConnectionError as e: print("The server could not be reached") @@ -182,14 +161,8 @@ client = DigitaloceanGenaiSDK( ) # Or, configure per-request: -client.with_options(max_retries=5).chat.completions.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", +client.with_options(max_retries=5).genai.agents.versions.list( + uuid="REPLACE_ME", ) ``` @@ -213,14 +186,8 @@ client = DigitaloceanGenaiSDK( ) # Override per-request: -client.with_options(timeout=5.0).chat.completions.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", +client.with_options(timeout=5.0).genai.agents.versions.list( + uuid="REPLACE_ME", ) ``` @@ -262,22 +229,18 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() -response = client.chat.completions.with_raw_response.create( - messages=[{ - "content": "string", - "role": "system", - }], - model="llama3-8b-instruct", +response = client.genai.agents.versions.with_raw_response.list( + uuid="REPLACE_ME", ) print(response.headers.get('X-My-Header')) -completion = response.parse() # get the object that `chat.completions.create()` would have returned -print(completion.id) +version = response.parse() # get the object that `genai.agents.versions.list()` would have returned +print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. +These methods return an [`APIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -286,14 +249,8 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.chat.completions.with_streaming_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", +with client.genai.agents.versions.with_streaming_response.list( + uuid="REPLACE_ME", ) as response: print(response.headers.get("X-My-Header")) @@ -389,7 +346,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/api.md b/api.md index 90a1a7d9..c65f46b7 100644 --- a/api.md +++ b/api.md @@ -1,32 +1,330 @@ -# Chat +# Genai + +Types: + +```python +from digitalocean_genai_sdk.types import GenaiRetrieveRegionsResponse +``` + +Methods: + +- client.genai.retrieve_regions(\*\*params) -> GenaiRetrieveRegionsResponse + +## Agents -## Completions +Types: + +```python +from digitalocean_genai_sdk.types.genai import ( + APIAgent, + APIDeploymentVisibility, + APIModel, + APIRetrievalMethod, + AgentCreateResponse, + AgentRetrieveResponse, + AgentUpdateResponse, + AgentListResponse, + AgentDeleteResponse, + AgentUpdateStatusResponse, +) +``` + +Methods: + +- client.genai.agents.create(\*\*params) -> AgentCreateResponse +- client.genai.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.genai.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.genai.agents.list(\*\*params) -> AgentListResponse +- client.genai.agents.delete(uuid) -> AgentDeleteResponse +- client.genai.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse + +### APIKeys Types: ```python -from digitalocean_genai_sdk.types.chat import ( - CreateModelProperties, - CreateResponse, - RequestMessageContentPartText, - ResponseMessage, - TokenLogprob, - Usage, +from digitalocean_genai_sdk.types.genai.agents import ( + APIAgentAPIKeyInfo, + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, + APIKeyRegenerateResponse, ) ``` Methods: -- client.chat.completions.create(\*\*params) -> CreateResponse +- client.genai.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.genai.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.genai.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.genai.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.genai.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse -# Completions +### Functions Types: ```python -from digitalocean_genai_sdk.types import ChatCompletionStreamOptions, StopConfiguration +from digitalocean_genai_sdk.types.genai.agents import ( + FunctionCreateResponse, + FunctionUpdateResponse, + FunctionDeleteResponse, +) ``` +Methods: + +- client.genai.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.genai.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.genai.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse + +### Versions + +Types: + +```python +from digitalocean_genai_sdk.types.genai.agents import ( + APILinks, + APIMeta, + VersionUpdateResponse, + VersionListResponse, +) +``` + +Methods: + +- client.genai.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.genai.agents.versions.list(uuid, \*\*params) -> VersionListResponse + +### KnowledgeBases + +Types: + +```python +from digitalocean_genai_sdk.types.genai.agents import ( + APILinkKnowledgeBaseOutput, + KnowledgeBaseDetachResponse, +) +``` + +Methods: + +- client.genai.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.genai.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.genai.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse + +### ChildAgents + +Types: + +```python +from digitalocean_genai_sdk.types.genai.agents import ( + ChildAgentUpdateResponse, + ChildAgentDeleteResponse, + ChildAgentAddResponse, + ChildAgentViewResponse, +) +``` + +Methods: + +- client.genai.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.genai.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.genai.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.genai.agents.child_agents.view(uuid) -> ChildAgentViewResponse + +## Providers + +### Anthropic + +#### Keys + +Types: + +```python +from digitalocean_genai_sdk.types.genai.providers.anthropic import ( + APIAnthropicAPIKeyInfo, + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyListAgentsResponse, +) +``` + +Methods: + +- client.genai.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.genai.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.genai.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.genai.providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.genai.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.genai.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse + +### OpenAI + +#### Keys + +Types: + +```python +from digitalocean_genai_sdk.types.genai.providers.openai import ( + APIOpenAIAPIKeyInfo, + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyRetrieveAgentsResponse, +) +``` + +Methods: + +- client.genai.providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.genai.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.genai.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.genai.providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.genai.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.genai.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse + +## Auth + +### Agents + +#### Token + +Types: + +```python +from digitalocean_genai_sdk.types.genai.auth.agents import TokenCreateResponse +``` + +Methods: + +- client.genai.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse + +## IndexingJobs + +Types: + +```python +from digitalocean_genai_sdk.types.genai import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.genai.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.genai.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.genai.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.genai.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.genai.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + +## KnowledgeBases + +Types: + +```python +from digitalocean_genai_sdk.types.genai import ( + APIKnowledgeBase, + KnowledgeBaseCreateResponse, + KnowledgeBaseRetrieveResponse, + KnowledgeBaseUpdateResponse, + KnowledgeBaseListResponse, + KnowledgeBaseDeleteResponse, +) +``` + +Methods: + +- client.genai.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.genai.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.genai.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.genai.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.genai.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse + +### DataSources + +Types: + +```python +from digitalocean_genai_sdk.types.genai.knowledge_bases import ( + APIFileUploadDataSource, + APIKnowledgeBaseDataSource, + APISpacesDataSource, + APIWebCrawlerDataSource, + DataSourceCreateResponse, + DataSourceListResponse, + DataSourceDeleteResponse, +) +``` + +Methods: + +- client.genai.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.genai.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.genai.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse + +## Models + +Types: + +```python +from digitalocean_genai_sdk.types.genai import APIAgreement, APIModelVersion, ModelListResponse +``` + +Methods: + +- client.genai.models.list(\*\*params) -> ModelListResponse + +### APIKeys + +Types: + +```python +from digitalocean_genai_sdk.types.genai.models import ( + APIModelAPIKeyInfo, + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, + APIKeyUpdateRegenerateResponse, +) +``` + +Methods: + +- client.genai.models.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.genai.models.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.genai.models.api_keys.list(\*\*params) -> APIKeyListResponse +- client.genai.models.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.genai.models.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse + +# Chat + +Types: + +```python +from digitalocean_genai_sdk.types import ( + ChatCompletionRequestMessageContentPartText, + ChatCompletionTokenLogprob, + ChatCreateCompletionResponse, +) +``` + +Methods: + +- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse + # Embeddings Types: @@ -51,11 +349,3 @@ Methods: - client.models.retrieve(model) -> Model - client.models.list() -> ModelListResponse - -# Responses - -Types: - -```python -from digitalocean_genai_sdk.types import ModelResponseProperties -``` diff --git a/bin/check-release-environment b/bin/check-release-environment deleted file mode 100644 index 9e89a88a..00000000 --- a/bin/check-release-environment +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -errors=() - -if [ -z "${PYPI_TOKEN}" ]; then - errors+=("The DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") -fi - -lenErrors=${#errors[@]} - -if [[ lenErrors -gt 0 ]]; then - echo -e "Found the following errors in the release environment:\n" - - for error in "${errors[@]}"; do - echo -e "- $error\n" - done - - exit 1 -fi - -echo "The environment is ready to push releases!" diff --git a/pyproject.toml b/pyproject.toml index a268c8cf..73fa240b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,9 @@ [project] -name = "do-genai" +name = "digitalocean_genai_sdk" version = "0.0.1-alpha.0" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] -license = "MIT" +license = "Apache-2.0" authors = [ { name = "Digitalocean Genai SDK", email = "" }, ] @@ -30,12 +30,12 @@ classifiers = [ "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Topic :: Software Development :: Libraries :: Python Modules", - "License :: OSI Approved :: MIT License" + "License :: OSI Approved :: Apache Software License" ] [project.urls] -Homepage = "https://github.com/digitalocean/genai-python" -Repository = "https://github.com/digitalocean/genai-python" +Homepage = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" +Repository = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" [tool.rye] @@ -121,7 +121,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/release-please-config.json b/release-please-config.json deleted file mode 100644 index 234b9475..00000000 --- a/release-please-config.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "packages": { - ".": {} - }, - "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", - "include-v-in-tag": true, - "include-component-in-tag": false, - "versioning": "prerelease", - "prerelease": true, - "bump-minor-pre-major": true, - "bump-patch-for-minor-pre-major": false, - "pull-request-header": "Automated Release PR", - "pull-request-title-pattern": "release: ${version}", - "changelog-sections": [ - { - "type": "feat", - "section": "Features" - }, - { - "type": "fix", - "section": "Bug Fixes" - }, - { - "type": "perf", - "section": "Performance Improvements" - }, - { - "type": "revert", - "section": "Reverts" - }, - { - "type": "chore", - "section": "Chores" - }, - { - "type": "docs", - "section": "Documentation" - }, - { - "type": "style", - "section": "Styles" - }, - { - "type": "refactor", - "section": "Refactors" - }, - { - "type": "test", - "section": "Tests", - "hidden": true - }, - { - "type": "build", - "section": "Build System" - }, - { - "type": "ci", - "section": "Continuous Integration", - "hidden": true - } - ], - "release-type": "python", - "extra-files": [ - "src/digitalocean_genai_sdk/_version.py" - ] -} \ No newline at end of file diff --git a/requirements-dev.lock b/requirements-dev.lock index 8a2680e6..bf449af3 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,7 +13,7 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via do-genai + # via digitalocean-genai-sdk # via httpx argcomplete==3.1.2 # via nox @@ -26,7 +26,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via do-genai + # via digitalocean-genai-sdk exceptiongroup==1.2.2 # via anyio # via pytest @@ -37,7 +37,7 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via do-genai + # via digitalocean-genai-sdk # via respx idna==3.4 # via anyio @@ -64,7 +64,7 @@ platformdirs==3.11.0 pluggy==1.5.0 # via pytest pydantic==2.10.3 - # via do-genai + # via digitalocean-genai-sdk pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -86,14 +86,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via do-genai + # via digitalocean-genai-sdk time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via do-genai + # via digitalocean-genai-sdk # via mypy # via pydantic # via pydantic-core diff --git a/requirements.lock b/requirements.lock index 832a9acd..e655776d 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,13 +13,13 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via do-genai + # via digitalocean-genai-sdk # via httpx certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via do-genai + # via digitalocean-genai-sdk exceptiongroup==1.2.2 # via anyio h11==0.14.0 @@ -27,19 +27,19 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via do-genai + # via digitalocean-genai-sdk idna==3.4 # via anyio # via httpx pydantic==2.10.3 - # via do-genai + # via digitalocean-genai-sdk pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via do-genai + # via digitalocean-genai-sdk typing-extensions==4.12.2 # via anyio - # via do-genai + # via digitalocean-genai-sdk # via pydantic # via pydantic-core diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py index 8a0fb675..3ce8b62f 100644 --- a/src/digitalocean_genai_sdk/_client.py +++ b/src/digitalocean_genai_sdk/_client.py @@ -21,7 +21,7 @@ ) from ._utils import is_given, get_async_library from ._version import __version__ -from .resources import models, embeddings +from .resources import chat, models, embeddings from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError from ._base_client import ( @@ -29,7 +29,7 @@ SyncAPIClient, AsyncAPIClient, ) -from .resources.chat import chat +from .resources.genai import genai __all__ = [ "Timeout", @@ -44,6 +44,7 @@ class DigitaloceanGenaiSDK(SyncAPIClient): + genai: genai.GenaiResource chat: chat.ChatResource embeddings: embeddings.EmbeddingsResource models: models.ModelsResource @@ -91,7 +92,7 @@ def __init__( if base_url is None: base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") if base_url is None: - base_url = f"https://api.example.com" + base_url = f"https://api.digitalocean.com/" super().__init__( version=__version__, @@ -104,6 +105,7 @@ def __init__( _strict_response_validation=_strict_response_validation, ) + self.genai = genai.GenaiResource(self) self.chat = chat.ChatResource(self) self.embeddings = embeddings.EmbeddingsResource(self) self.models = models.ModelsResource(self) @@ -216,6 +218,7 @@ def _make_status_error( class AsyncDigitaloceanGenaiSDK(AsyncAPIClient): + genai: genai.AsyncGenaiResource chat: chat.AsyncChatResource embeddings: embeddings.AsyncEmbeddingsResource models: models.AsyncModelsResource @@ -263,7 +266,7 @@ def __init__( if base_url is None: base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") if base_url is None: - base_url = f"https://api.example.com" + base_url = f"https://api.digitalocean.com/" super().__init__( version=__version__, @@ -276,6 +279,7 @@ def __init__( _strict_response_validation=_strict_response_validation, ) + self.genai = genai.AsyncGenaiResource(self) self.chat = chat.AsyncChatResource(self) self.embeddings = embeddings.AsyncEmbeddingsResource(self) self.models = models.AsyncModelsResource(self) @@ -389,6 +393,7 @@ def _make_status_error( class DigitaloceanGenaiSDKWithRawResponse: def __init__(self, client: DigitaloceanGenaiSDK) -> None: + self.genai = genai.GenaiResourceWithRawResponse(client.genai) self.chat = chat.ChatResourceWithRawResponse(client.chat) self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings) self.models = models.ModelsResourceWithRawResponse(client.models) @@ -396,6 +401,7 @@ def __init__(self, client: DigitaloceanGenaiSDK) -> None: class AsyncDigitaloceanGenaiSDKWithRawResponse: def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + self.genai = genai.AsyncGenaiResourceWithRawResponse(client.genai) self.chat = chat.AsyncChatResourceWithRawResponse(client.chat) self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings) self.models = models.AsyncModelsResourceWithRawResponse(client.models) @@ -403,6 +409,7 @@ def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: class DigitaloceanGenaiSDKWithStreamedResponse: def __init__(self, client: DigitaloceanGenaiSDK) -> None: + self.genai = genai.GenaiResourceWithStreamingResponse(client.genai) self.chat = chat.ChatResourceWithStreamingResponse(client.chat) self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings) self.models = models.ModelsResourceWithStreamingResponse(client.models) @@ -410,6 +417,7 @@ def __init__(self, client: DigitaloceanGenaiSDK) -> None: class AsyncDigitaloceanGenaiSDKWithStreamedResponse: def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + self.genai = genai.AsyncGenaiResourceWithStreamingResponse(client.genai) self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat) self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings) self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py index bb83d491..5c4fa53a 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/digitalocean_genai_sdk/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "digitalocean_genai_sdk" -__version__ = "0.0.1-alpha.0" # x-release-please-version +__version__ = "0.0.1-alpha.0" diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py index c9177434..c4e2a67e 100644 --- a/src/digitalocean_genai_sdk/resources/__init__.py +++ b/src/digitalocean_genai_sdk/resources/__init__.py @@ -8,6 +8,14 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) +from .genai import ( + GenaiResource, + AsyncGenaiResource, + GenaiResourceWithRawResponse, + AsyncGenaiResourceWithRawResponse, + GenaiResourceWithStreamingResponse, + AsyncGenaiResourceWithStreamingResponse, +) from .models import ( ModelsResource, AsyncModelsResource, @@ -26,6 +34,12 @@ ) __all__ = [ + "GenaiResource", + "AsyncGenaiResource", + "GenaiResourceWithRawResponse", + "AsyncGenaiResourceWithRawResponse", + "GenaiResourceWithStreamingResponse", + "AsyncGenaiResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", diff --git a/src/digitalocean_genai_sdk/resources/chat/completions.py b/src/digitalocean_genai_sdk/resources/chat.py similarity index 80% rename from src/digitalocean_genai_sdk/resources/chat/completions.py rename to src/digitalocean_genai_sdk/resources/chat.py index effaf077..e193c696 100644 --- a/src/digitalocean_genai_sdk/resources/chat/completions.py +++ b/src/digitalocean_genai_sdk/resources/chat.py @@ -2,53 +2,51 @@ from __future__ import annotations -from typing import Dict, Iterable, Optional +from typing import Dict, List, Union, Iterable, Optional import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import chat_create_completion_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...types.chat import completion_create_params -from ..._base_client import make_request_options -from ...types.chat.create_response import CreateResponse -from ...types.stop_configuration_param import StopConfigurationParam -from ...types.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from .._base_client import make_request_options +from ..types.chat_create_completion_response import ChatCreateCompletionResponse -__all__ = ["CompletionsResource", "AsyncCompletionsResource"] +__all__ = ["ChatResource", "AsyncChatResource"] -class CompletionsResource(SyncAPIResource): +class ChatResource(SyncAPIResource): @cached_property - def with_raw_response(self) -> CompletionsResourceWithRawResponse: + def with_raw_response(self) -> ChatResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ - return CompletionsResourceWithRawResponse(self) + return ChatResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: + def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ - return CompletionsResourceWithStreamingResponse(self) + return ChatResourceWithStreamingResponse(self) - def create( + def create_completion( self, *, - messages: Iterable[completion_create_params.Message], + messages: Iterable[chat_create_completion_params.Message], model: str, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, @@ -58,9 +56,9 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + stream_options: Optional[chat_create_completion_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, @@ -71,7 +69,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateResponse: + ) -> ChatCreateCompletionResponse: """ Creates a model response for the given chat conversation. @@ -177,39 +175,39 @@ def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParams, + chat_create_completion_params.ChatCreateCompletionParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=CreateResponse, + cast_to=ChatCreateCompletionResponse, ) -class AsyncCompletionsResource(AsyncAPIResource): +class AsyncChatResource(AsyncAPIResource): @cached_property - def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: + def with_raw_response(self) -> AsyncChatResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ - return AsyncCompletionsResourceWithRawResponse(self) + return AsyncChatResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ - return AsyncCompletionsResourceWithStreamingResponse(self) + return AsyncChatResourceWithStreamingResponse(self) - async def create( + async def create_completion( self, *, - messages: Iterable[completion_create_params.Message], + messages: Iterable[chat_create_completion_params.Message], model: str, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, @@ -219,9 +217,9 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Optional[StopConfigurationParam] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + stream_options: Optional[chat_create_completion_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, @@ -232,7 +230,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CreateResponse: + ) -> ChatCreateCompletionResponse: """ Creates a model response for the given chat conversation. @@ -338,46 +336,46 @@ async def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParams, + chat_create_completion_params.ChatCreateCompletionParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=CreateResponse, + cast_to=ChatCreateCompletionResponse, ) -class CompletionsResourceWithRawResponse: - def __init__(self, completions: CompletionsResource) -> None: - self._completions = completions +class ChatResourceWithRawResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat - self.create = to_raw_response_wrapper( - completions.create, + self.create_completion = to_raw_response_wrapper( + chat.create_completion, ) -class AsyncCompletionsResourceWithRawResponse: - def __init__(self, completions: AsyncCompletionsResource) -> None: - self._completions = completions +class AsyncChatResourceWithRawResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat - self.create = async_to_raw_response_wrapper( - completions.create, + self.create_completion = async_to_raw_response_wrapper( + chat.create_completion, ) -class CompletionsResourceWithStreamingResponse: - def __init__(self, completions: CompletionsResource) -> None: - self._completions = completions +class ChatResourceWithStreamingResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat - self.create = to_streamed_response_wrapper( - completions.create, + self.create_completion = to_streamed_response_wrapper( + chat.create_completion, ) -class AsyncCompletionsResourceWithStreamingResponse: - def __init__(self, completions: AsyncCompletionsResource) -> None: - self._completions = completions +class AsyncChatResourceWithStreamingResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat - self.create = async_to_streamed_response_wrapper( - completions.create, + self.create_completion = async_to_streamed_response_wrapper( + chat.create_completion, ) diff --git a/src/digitalocean_genai_sdk/resources/chat/__init__.py b/src/digitalocean_genai_sdk/resources/chat/__init__.py deleted file mode 100644 index ec960eb4..00000000 --- a/src/digitalocean_genai_sdk/resources/chat/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .chat import ( - ChatResource, - AsyncChatResource, - ChatResourceWithRawResponse, - AsyncChatResourceWithRawResponse, - ChatResourceWithStreamingResponse, - AsyncChatResourceWithStreamingResponse, -) -from .completions import ( - CompletionsResource, - AsyncCompletionsResource, - CompletionsResourceWithRawResponse, - AsyncCompletionsResourceWithRawResponse, - CompletionsResourceWithStreamingResponse, - AsyncCompletionsResourceWithStreamingResponse, -) - -__all__ = [ - "CompletionsResource", - "AsyncCompletionsResource", - "CompletionsResourceWithRawResponse", - "AsyncCompletionsResourceWithRawResponse", - "CompletionsResourceWithStreamingResponse", - "AsyncCompletionsResourceWithStreamingResponse", - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/chat/chat.py b/src/digitalocean_genai_sdk/resources/chat/chat.py deleted file mode 100644 index ac19d849..00000000 --- a/src/digitalocean_genai_sdk/resources/chat/chat.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .completions import ( - CompletionsResource, - AsyncCompletionsResource, - CompletionsResourceWithRawResponse, - AsyncCompletionsResourceWithRawResponse, - CompletionsResourceWithStreamingResponse, - AsyncCompletionsResourceWithStreamingResponse, -) - -__all__ = ["ChatResource", "AsyncChatResource"] - - -class ChatResource(SyncAPIResource): - @cached_property - def completions(self) -> CompletionsResource: - return CompletionsResource(self._client) - - @cached_property - def with_raw_response(self) -> ChatResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ChatResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ChatResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ChatResourceWithStreamingResponse(self) - - -class AsyncChatResource(AsyncAPIResource): - @cached_property - def completions(self) -> AsyncCompletionsResource: - return AsyncCompletionsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncChatResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncChatResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncChatResourceWithStreamingResponse(self) - - -class ChatResourceWithRawResponse: - def __init__(self, chat: ChatResource) -> None: - self._chat = chat - - @cached_property - def completions(self) -> CompletionsResourceWithRawResponse: - return CompletionsResourceWithRawResponse(self._chat.completions) - - -class AsyncChatResourceWithRawResponse: - def __init__(self, chat: AsyncChatResource) -> None: - self._chat = chat - - @cached_property - def completions(self) -> AsyncCompletionsResourceWithRawResponse: - return AsyncCompletionsResourceWithRawResponse(self._chat.completions) - - -class ChatResourceWithStreamingResponse: - def __init__(self, chat: ChatResource) -> None: - self._chat = chat - - @cached_property - def completions(self) -> CompletionsResourceWithStreamingResponse: - return CompletionsResourceWithStreamingResponse(self._chat.completions) - - -class AsyncChatResourceWithStreamingResponse: - def __init__(self, chat: AsyncChatResource) -> None: - self._chat = chat - - @cached_property - def completions(self) -> AsyncCompletionsResourceWithStreamingResponse: - return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py index 1bcd3145..95146443 100644 --- a/src/digitalocean_genai_sdk/resources/embeddings.py +++ b/src/digitalocean_genai_sdk/resources/embeddings.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -101,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -110,7 +110,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/genai/__init__.py b/src/digitalocean_genai_sdk/resources/genai/__init__.py new file mode 100644 index 00000000..010339f5 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/__init__.py @@ -0,0 +1,103 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) +from .genai import ( + GenaiResource, + AsyncGenaiResource, + GenaiResourceWithRawResponse, + AsyncGenaiResourceWithRawResponse, + GenaiResourceWithStreamingResponse, + AsyncGenaiResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", + "GenaiResource", + "AsyncGenaiResource", + "GenaiResourceWithRawResponse", + "AsyncGenaiResourceWithRawResponse", + "GenaiResourceWithStreamingResponse", + "AsyncGenaiResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/__init__.py b/src/digitalocean_genai_sdk/resources/genai/agents/__init__.py new file mode 100644 index 00000000..f41a0408 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/__init__.py @@ -0,0 +1,89 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, +) +from .functions import ( + FunctionsResource, + AsyncFunctionsResource, + FunctionsResourceWithRawResponse, + AsyncFunctionsResourceWithRawResponse, + FunctionsResourceWithStreamingResponse, + AsyncFunctionsResourceWithStreamingResponse, +) +from .child_agents import ( + ChildAgentsResource, + AsyncChildAgentsResource, + ChildAgentsResourceWithRawResponse, + AsyncChildAgentsResourceWithRawResponse, + ChildAgentsResourceWithStreamingResponse, + AsyncChildAgentsResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", + "FunctionsResource", + "AsyncFunctionsResource", + "FunctionsResourceWithRawResponse", + "AsyncFunctionsResourceWithRawResponse", + "FunctionsResourceWithStreamingResponse", + "AsyncFunctionsResourceWithStreamingResponse", + "VersionsResource", + "AsyncVersionsResource", + "VersionsResourceWithRawResponse", + "AsyncVersionsResourceWithRawResponse", + "VersionsResourceWithStreamingResponse", + "AsyncVersionsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", + "ChildAgentsResource", + "AsyncChildAgentsResource", + "ChildAgentsResourceWithRawResponse", + "AsyncChildAgentsResourceWithRawResponse", + "ChildAgentsResourceWithStreamingResponse", + "AsyncChildAgentsResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/agents.py b/src/digitalocean_genai_sdk/resources/genai/agents/agents.py new file mode 100644 index 00000000..1b209988 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/agents.py @@ -0,0 +1,965 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from .functions import ( + FunctionsResource, + AsyncFunctionsResource, + FunctionsResourceWithRawResponse, + AsyncFunctionsResourceWithRawResponse, + FunctionsResourceWithStreamingResponse, + AsyncFunctionsResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .child_agents import ( + ChildAgentsResource, + AsyncChildAgentsResource, + ChildAgentsResourceWithRawResponse, + AsyncChildAgentsResourceWithRawResponse, + ChildAgentsResourceWithStreamingResponse, + AsyncChildAgentsResourceWithStreamingResponse, +) +from ....types.genai import ( + APIRetrievalMethod, + APIDeploymentVisibility, + agent_list_params, + agent_create_params, + agent_update_params, + agent_update_status_params, +) +from ...._base_client import make_request_options +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) +from ....types.genai.agent_list_response import AgentListResponse +from ....types.genai.api_retrieval_method import APIRetrievalMethod +from ....types.genai.agent_create_response import AgentCreateResponse +from ....types.genai.agent_delete_response import AgentDeleteResponse +from ....types.genai.agent_update_response import AgentUpdateResponse +from ....types.genai.agent_retrieve_response import AgentRetrieveResponse +from ....types.genai.api_deployment_visibility import APIDeploymentVisibility +from ....types.genai.agent_update_status_response import AgentUpdateStatusResponse + +__all__ = ["AgentsResource", "AsyncAgentsResource"] + + +class AgentsResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + + @cached_property + def functions(self) -> FunctionsResource: + return FunctionsResource(self._client) + + @cached_property + def versions(self) -> VersionsResource: + return VersionsResource(self._client) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + return KnowledgeBasesResource(self._client) + + @cached_property + def child_agents(self) -> ChildAgentsResource: + return ChildAgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AgentsResourceWithStreamingResponse(self) + + def create( + self, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentCreateResponse: + """To create a new agent, send a POST request to `/v2/gen-ai/agents`. + + The response + body contains a JSON object with the newly created agent object. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + model_uuid: Identifier for the foundation model. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/genai/agents", + body=maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "knowledge_base_uuid": knowledge_base_uuid, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "region": region, + "tags": tags, + }, + agent_create_params.AgentCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentCreateResponse, + ) + + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/genai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + def update( + self, + path_uuid: str, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + k: int | NotGiven = NOT_GIVEN, + max_tokens: int | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + provide_citations: bool | NotGiven = NOT_GIVEN, + retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + top_p: float | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateResponse: + """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + max_tokens: Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + + model_uuid: Identifier for the foundation model. + + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower + values produce more predictable and conservative responses, while higher values + encourage creativity and variation. + + top_p: Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/genai/agents/{path_uuid}", + body=maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "k": k, + "max_tokens": max_tokens, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "provide_citations": provide_citations, + "retrieval_method": retrieval_method, + "tags": tags, + "temperature": temperature, + "top_p": top_p, + "body_uuid": body_uuid, + }, + agent_update_params.AgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateResponse, + ) + + def list( + self, + *, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents, send a GET request to `/v2/gen-ai/agents`. + + Args: + only_deployed: only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentDeleteResponse: + """ + To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/genai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentDeleteResponse, + ) + + def update_status( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateStatusResponse: + """Check whether an agent is public or private. + + To update the agent status, send a + PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/genai/agents/{path_uuid}/deployment_visibility", + body=maybe_transform( + { + "body_uuid": body_uuid, + "visibility": visibility, + }, + agent_update_status_params.AgentUpdateStatusParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateStatusResponse, + ) + + +class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + + @cached_property + def functions(self) -> AsyncFunctionsResource: + return AsyncFunctionsResource(self._client) + + @cached_property + def versions(self) -> AsyncVersionsResource: + return AsyncVersionsResource(self._client) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + return AsyncKnowledgeBasesResource(self._client) + + @cached_property + def child_agents(self) -> AsyncChildAgentsResource: + return AsyncChildAgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAgentsResourceWithStreamingResponse(self) + + async def create( + self, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentCreateResponse: + """To create a new agent, send a POST request to `/v2/gen-ai/agents`. + + The response + body contains a JSON object with the newly created agent object. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + model_uuid: Identifier for the foundation model. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/genai/agents", + body=await async_maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "knowledge_base_uuid": knowledge_base_uuid, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "region": region, + "tags": tags, + }, + agent_create_params.AgentCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentCreateResponse, + ) + + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/genai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + async def update( + self, + path_uuid: str, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + k: int | NotGiven = NOT_GIVEN, + max_tokens: int | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + provide_citations: bool | NotGiven = NOT_GIVEN, + retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + top_p: float | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateResponse: + """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + max_tokens: Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + + model_uuid: Identifier for the foundation model. + + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower + values produce more predictable and conservative responses, while higher values + encourage creativity and variation. + + top_p: Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/genai/agents/{path_uuid}", + body=await async_maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "k": k, + "max_tokens": max_tokens, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "provide_citations": provide_citations, + "retrieval_method": retrieval_method, + "tags": tags, + "temperature": temperature, + "top_p": top_p, + "body_uuid": body_uuid, + }, + agent_update_params.AgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateResponse, + ) + + async def list( + self, + *, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents, send a GET request to `/v2/gen-ai/agents`. + + Args: + only_deployed: only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentDeleteResponse: + """ + To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/genai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentDeleteResponse, + ) + + async def update_status( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateStatusResponse: + """Check whether an agent is public or private. + + To update the agent status, send a + PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/genai/agents/{path_uuid}/deployment_visibility", + body=await async_maybe_transform( + { + "body_uuid": body_uuid, + "visibility": visibility, + }, + agent_update_status_params.AgentUpdateStatusParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateStatusResponse, + ) + + +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + self.create = to_raw_response_wrapper( + agents.create, + ) + self.retrieve = to_raw_response_wrapper( + agents.retrieve, + ) + self.update = to_raw_response_wrapper( + agents.update, + ) + self.list = to_raw_response_wrapper( + agents.list, + ) + self.delete = to_raw_response_wrapper( + agents.delete, + ) + self.update_status = to_raw_response_wrapper( + agents.update_status, + ) + + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._agents.api_keys) + + @cached_property + def functions(self) -> FunctionsResourceWithRawResponse: + return FunctionsResourceWithRawResponse(self._agents.functions) + + @cached_property + def versions(self) -> VersionsResourceWithRawResponse: + return VersionsResourceWithRawResponse(self._agents.versions) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: + return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + + @cached_property + def child_agents(self) -> ChildAgentsResourceWithRawResponse: + return ChildAgentsResourceWithRawResponse(self._agents.child_agents) + + +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + self.create = async_to_raw_response_wrapper( + agents.create, + ) + self.retrieve = async_to_raw_response_wrapper( + agents.retrieve, + ) + self.update = async_to_raw_response_wrapper( + agents.update, + ) + self.list = async_to_raw_response_wrapper( + agents.list, + ) + self.delete = async_to_raw_response_wrapper( + agents.delete, + ) + self.update_status = async_to_raw_response_wrapper( + agents.update_status, + ) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + + @cached_property + def functions(self) -> AsyncFunctionsResourceWithRawResponse: + return AsyncFunctionsResourceWithRawResponse(self._agents.functions) + + @cached_property + def versions(self) -> AsyncVersionsResourceWithRawResponse: + return AsyncVersionsResourceWithRawResponse(self._agents.versions) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + + @cached_property + def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: + return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) + + +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + self.create = to_streamed_response_wrapper( + agents.create, + ) + self.retrieve = to_streamed_response_wrapper( + agents.retrieve, + ) + self.update = to_streamed_response_wrapper( + agents.update, + ) + self.list = to_streamed_response_wrapper( + agents.list, + ) + self.delete = to_streamed_response_wrapper( + agents.delete, + ) + self.update_status = to_streamed_response_wrapper( + agents.update_status, + ) + + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + + @cached_property + def functions(self) -> FunctionsResourceWithStreamingResponse: + return FunctionsResourceWithStreamingResponse(self._agents.functions) + + @cached_property + def versions(self) -> VersionsResourceWithStreamingResponse: + return VersionsResourceWithStreamingResponse(self._agents.versions) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: + return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + + @cached_property + def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: + return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + + +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + self.create = async_to_streamed_response_wrapper( + agents.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + agents.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + agents.update, + ) + self.list = async_to_streamed_response_wrapper( + agents.list, + ) + self.delete = async_to_streamed_response_wrapper( + agents.delete, + ) + self.update_status = async_to_streamed_response_wrapper( + agents.update_status, + ) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + + @cached_property + def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: + return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) + + @cached_property + def versions(self) -> AsyncVersionsResourceWithStreamingResponse: + return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + + @cached_property + def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: + return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py b/src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py new file mode 100644 index 00000000..940be6d7 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py @@ -0,0 +1,581 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.agents import api_key_list_params, api_key_create_params, api_key_update_params +from ....types.genai.agents.api_key_list_response import APIKeyListResponse +from ....types.genai.agents.api_key_create_response import APIKeyCreateResponse +from ....types.genai.agents.api_key_delete_response import APIKeyDeleteResponse +from ....types.genai.agents.api_key_update_response import APIKeyUpdateResponse +from ....types.genai.agents.api_key_regenerate_response import APIKeyRegenerateResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create an agent API key, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/genai/agents/{path_agent_uuid}/api_keys", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "name": name, + }, + api_key_create_params.APIKeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/genai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + def list( + self, + agent_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all agent API keys, send a GET request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return self._get( + f"/v2/genai/agents/{agent_uuid}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + def regenerate( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyRegenerateResponse: + """ + To regenerate an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._put( + f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyRegenerateResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create an agent API key, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/genai/agents/{path_agent_uuid}/api_keys", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "name": name, + }, + api_key_create_params.APIKeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/genai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + async def list( + self, + agent_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all agent API keys, send a GET request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return await self._get( + f"/v2/genai/agents/{agent_uuid}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + async def regenerate( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyRegenerateResponse: + """ + To regenerate an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._put( + f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyRegenerateResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_raw_response_wrapper( + api_keys.create, + ) + self.update = to_raw_response_wrapper( + api_keys.update, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) + self.regenerate = to_raw_response_wrapper( + api_keys.regenerate, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_raw_response_wrapper( + api_keys.create, + ) + self.update = async_to_raw_response_wrapper( + api_keys.update, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) + self.regenerate = async_to_raw_response_wrapper( + api_keys.regenerate, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_streamed_response_wrapper( + api_keys.create, + ) + self.update = to_streamed_response_wrapper( + api_keys.update, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) + self.regenerate = to_streamed_response_wrapper( + api_keys.regenerate, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_streamed_response_wrapper( + api_keys.create, + ) + self.update = async_to_streamed_response_wrapper( + api_keys.update, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) + self.regenerate = async_to_streamed_response_wrapper( + api_keys.regenerate, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py b/src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py new file mode 100644 index 00000000..75fb267e --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py @@ -0,0 +1,508 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.agents import child_agent_add_params, child_agent_update_params +from ....types.genai.agents.child_agent_add_response import ChildAgentAddResponse +from ....types.genai.agents.child_agent_view_response import ChildAgentViewResponse +from ....types.genai.agents.child_agent_delete_response import ChildAgentDeleteResponse +from ....types.genai.agents.child_agent_update_response import ChildAgentUpdateResponse + +__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] + + +class ChildAgentsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ChildAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ChildAgentsResourceWithStreamingResponse(self) + + def update( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentUpdateResponse: + """ + To update an agent route for an agent, send a PUT request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return self._put( + f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + "uuid": uuid, + }, + child_agent_update_params.ChildAgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentUpdateResponse, + ) + + def delete( + self, + child_agent_uuid: str, + *, + parent_agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentDeleteResponse: + """ + To delete an agent route from a parent agent, send a DELETE request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not parent_agent_uuid: + raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") + if not child_agent_uuid: + raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") + return self._delete( + f"/v2/genai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentDeleteResponse, + ) + + def add( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentAddResponse: + """ + To add an agent route to an agent, send a POST request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return self._post( + f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + }, + child_agent_add_params.ChildAgentAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentAddResponse, + ) + + def view( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentViewResponse: + """ + To view agent routes for an agent, send a GET requtest to + `/v2/gen-ai/agents/{uuid}/child_agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/genai/agents/{uuid}/child_agents", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentViewResponse, + ) + + +class AsyncChildAgentsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncChildAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncChildAgentsResourceWithStreamingResponse(self) + + async def update( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentUpdateResponse: + """ + To update an agent route for an agent, send a PUT request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return await self._put( + f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=await async_maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + "uuid": uuid, + }, + child_agent_update_params.ChildAgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentUpdateResponse, + ) + + async def delete( + self, + child_agent_uuid: str, + *, + parent_agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentDeleteResponse: + """ + To delete an agent route from a parent agent, send a DELETE request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not parent_agent_uuid: + raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") + if not child_agent_uuid: + raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") + return await self._delete( + f"/v2/genai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentDeleteResponse, + ) + + async def add( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentAddResponse: + """ + To add an agent route to an agent, send a POST request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return await self._post( + f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=await async_maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + }, + child_agent_add_params.ChildAgentAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentAddResponse, + ) + + async def view( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentViewResponse: + """ + To view agent routes for an agent, send a GET requtest to + `/v2/gen-ai/agents/{uuid}/child_agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/genai/agents/{uuid}/child_agents", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentViewResponse, + ) + + +class ChildAgentsResourceWithRawResponse: + def __init__(self, child_agents: ChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = to_raw_response_wrapper( + child_agents.update, + ) + self.delete = to_raw_response_wrapper( + child_agents.delete, + ) + self.add = to_raw_response_wrapper( + child_agents.add, + ) + self.view = to_raw_response_wrapper( + child_agents.view, + ) + + +class AsyncChildAgentsResourceWithRawResponse: + def __init__(self, child_agents: AsyncChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = async_to_raw_response_wrapper( + child_agents.update, + ) + self.delete = async_to_raw_response_wrapper( + child_agents.delete, + ) + self.add = async_to_raw_response_wrapper( + child_agents.add, + ) + self.view = async_to_raw_response_wrapper( + child_agents.view, + ) + + +class ChildAgentsResourceWithStreamingResponse: + def __init__(self, child_agents: ChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = to_streamed_response_wrapper( + child_agents.update, + ) + self.delete = to_streamed_response_wrapper( + child_agents.delete, + ) + self.add = to_streamed_response_wrapper( + child_agents.add, + ) + self.view = to_streamed_response_wrapper( + child_agents.view, + ) + + +class AsyncChildAgentsResourceWithStreamingResponse: + def __init__(self, child_agents: AsyncChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = async_to_streamed_response_wrapper( + child_agents.update, + ) + self.delete = async_to_streamed_response_wrapper( + child_agents.delete, + ) + self.add = async_to_streamed_response_wrapper( + child_agents.add, + ) + self.view = async_to_streamed_response_wrapper( + child_agents.view, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/functions.py b/src/digitalocean_genai_sdk/resources/genai/agents/functions.py new file mode 100644 index 00000000..cf18aad5 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/functions.py @@ -0,0 +1,421 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.agents import function_create_params, function_update_params +from ....types.genai.agents.function_create_response import FunctionCreateResponse +from ....types.genai.agents.function_delete_response import FunctionDeleteResponse +from ....types.genai.agents.function_update_response import FunctionUpdateResponse + +__all__ = ["FunctionsResource", "AsyncFunctionsResource"] + + +class FunctionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FunctionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return FunctionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return FunctionsResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionCreateResponse: + """ + To create a function route for an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/functions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/genai/agents/{path_agent_uuid}/functions", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_create_params.FunctionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionCreateResponse, + ) + + def update( + self, + path_function_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + body_function_uuid: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionUpdateResponse: + """ + To update the function route, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_function_uuid: + raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") + return self._put( + f"/v2/genai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "body_function_uuid": body_function_uuid, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_update_params.FunctionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionUpdateResponse, + ) + + def delete( + self, + function_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionDeleteResponse: + """ + To delete a function route from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not function_uuid: + raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") + return self._delete( + f"/v2/genai/agents/{agent_uuid}/functions/{function_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionDeleteResponse, + ) + + +class AsyncFunctionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncFunctionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncFunctionsResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionCreateResponse: + """ + To create a function route for an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/functions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/genai/agents/{path_agent_uuid}/functions", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_create_params.FunctionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionCreateResponse, + ) + + async def update( + self, + path_function_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + body_function_uuid: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionUpdateResponse: + """ + To update the function route, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_function_uuid: + raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") + return await self._put( + f"/v2/genai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "body_function_uuid": body_function_uuid, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_update_params.FunctionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionUpdateResponse, + ) + + async def delete( + self, + function_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionDeleteResponse: + """ + To delete a function route from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not function_uuid: + raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") + return await self._delete( + f"/v2/genai/agents/{agent_uuid}/functions/{function_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionDeleteResponse, + ) + + +class FunctionsResourceWithRawResponse: + def __init__(self, functions: FunctionsResource) -> None: + self._functions = functions + + self.create = to_raw_response_wrapper( + functions.create, + ) + self.update = to_raw_response_wrapper( + functions.update, + ) + self.delete = to_raw_response_wrapper( + functions.delete, + ) + + +class AsyncFunctionsResourceWithRawResponse: + def __init__(self, functions: AsyncFunctionsResource) -> None: + self._functions = functions + + self.create = async_to_raw_response_wrapper( + functions.create, + ) + self.update = async_to_raw_response_wrapper( + functions.update, + ) + self.delete = async_to_raw_response_wrapper( + functions.delete, + ) + + +class FunctionsResourceWithStreamingResponse: + def __init__(self, functions: FunctionsResource) -> None: + self._functions = functions + + self.create = to_streamed_response_wrapper( + functions.create, + ) + self.update = to_streamed_response_wrapper( + functions.update, + ) + self.delete = to_streamed_response_wrapper( + functions.delete, + ) + + +class AsyncFunctionsResourceWithStreamingResponse: + def __init__(self, functions: AsyncFunctionsResource) -> None: + self._functions = functions + + self.create = async_to_streamed_response_wrapper( + functions.create, + ) + self.update = async_to_streamed_response_wrapper( + functions.update, + ) + self.delete = async_to_streamed_response_wrapper( + functions.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py new file mode 100644 index 00000000..ed9fed4f --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py @@ -0,0 +1,346 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ....types.genai.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse + +__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] + + +class KnowledgeBasesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return KnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return KnowledgeBasesResourceWithStreamingResponse(self) + + def attach( + self, + agent_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach knowledge bases to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return self._post( + f"/v2/genai/agents/{agent_uuid}/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + def attach_single( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach a knowledge base to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._post( + f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + def detach( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDetachResponse: + """ + To detach a knowledge base from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._delete( + f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDetachResponse, + ) + + +class AsyncKnowledgeBasesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncKnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncKnowledgeBasesResourceWithStreamingResponse(self) + + async def attach( + self, + agent_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach knowledge bases to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return await self._post( + f"/v2/genai/agents/{agent_uuid}/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + async def attach_single( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach a knowledge base to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._post( + f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + async def detach( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDetachResponse: + """ + To detach a knowledge base from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._delete( + f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDetachResponse, + ) + + +class KnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = to_raw_response_wrapper( + knowledge_bases.attach, + ) + self.attach_single = to_raw_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = to_raw_response_wrapper( + knowledge_bases.detach, + ) + + +class AsyncKnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = async_to_raw_response_wrapper( + knowledge_bases.attach, + ) + self.attach_single = async_to_raw_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = async_to_raw_response_wrapper( + knowledge_bases.detach, + ) + + +class KnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = to_streamed_response_wrapper( + knowledge_bases.attach, + ) + self.attach_single = to_streamed_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = to_streamed_response_wrapper( + knowledge_bases.detach, + ) + + +class AsyncKnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = async_to_streamed_response_wrapper( + knowledge_bases.attach, + ) + self.attach_single = async_to_streamed_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = async_to_streamed_response_wrapper( + knowledge_bases.detach, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/versions.py b/src/digitalocean_genai_sdk/resources/genai/agents/versions.py new file mode 100644 index 00000000..5c0b6826 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/agents/versions.py @@ -0,0 +1,298 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.agents import version_list_params, version_update_params +from ....types.genai.agents.version_list_response import VersionListResponse +from ....types.genai.agents.version_update_response import VersionUpdateResponse + +__all__ = ["VersionsResource", "AsyncVersionsResource"] + + +class VersionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> VersionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return VersionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return VersionsResourceWithStreamingResponse(self) + + def update( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + version_hash: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VersionUpdateResponse: + """ + To update to a specific agent version, send a PUT request to + `/v2/gen-ai/agents/{uuid}/versions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_uuid}/versions", + body=maybe_transform( + { + "body_uuid": body_uuid, + "version_hash": version_hash, + }, + version_update_params.VersionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VersionUpdateResponse, + ) + + def list( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VersionListResponse: + """ + To list all agent versions, send a GET request to + `/v2/gen-ai/agents/{uuid}/versions`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{uuid}/versions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + version_list_params.VersionListParams, + ), + ), + cast_to=VersionListResponse, + ) + + +class AsyncVersionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncVersionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncVersionsResourceWithStreamingResponse(self) + + async def update( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + version_hash: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VersionUpdateResponse: + """ + To update to a specific agent version, send a PUT request to + `/v2/gen-ai/agents/{uuid}/versions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_uuid}/versions", + body=await async_maybe_transform( + { + "body_uuid": body_uuid, + "version_hash": version_hash, + }, + version_update_params.VersionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VersionUpdateResponse, + ) + + async def list( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VersionListResponse: + """ + To list all agent versions, send a GET request to + `/v2/gen-ai/agents/{uuid}/versions`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{uuid}/versions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + version_list_params.VersionListParams, + ), + ), + cast_to=VersionListResponse, + ) + + +class VersionsResourceWithRawResponse: + def __init__(self, versions: VersionsResource) -> None: + self._versions = versions + + self.update = to_raw_response_wrapper( + versions.update, + ) + self.list = to_raw_response_wrapper( + versions.list, + ) + + +class AsyncVersionsResourceWithRawResponse: + def __init__(self, versions: AsyncVersionsResource) -> None: + self._versions = versions + + self.update = async_to_raw_response_wrapper( + versions.update, + ) + self.list = async_to_raw_response_wrapper( + versions.list, + ) + + +class VersionsResourceWithStreamingResponse: + def __init__(self, versions: VersionsResource) -> None: + self._versions = versions + + self.update = to_streamed_response_wrapper( + versions.update, + ) + self.list = to_streamed_response_wrapper( + versions.list, + ) + + +class AsyncVersionsResourceWithStreamingResponse: + def __init__(self, versions: AsyncVersionsResource) -> None: + self._versions = versions + + self.update = async_to_streamed_response_wrapper( + versions.update, + ) + self.list = async_to_streamed_response_wrapper( + versions.list, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/__init__.py b/src/digitalocean_genai_sdk/resources/genai/auth/__init__.py new file mode 100644 index 00000000..7c844a98 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/auth/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/agents/__init__.py b/src/digitalocean_genai_sdk/resources/genai/auth/agents/__init__.py new file mode 100644 index 00000000..2972198f --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/auth/agents/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .token import ( + TokenResource, + AsyncTokenResource, + TokenResourceWithRawResponse, + AsyncTokenResourceWithRawResponse, + TokenResourceWithStreamingResponse, + AsyncTokenResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = [ + "TokenResource", + "AsyncTokenResource", + "TokenResourceWithRawResponse", + "AsyncTokenResourceWithRawResponse", + "TokenResourceWithStreamingResponse", + "AsyncTokenResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py b/src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py new file mode 100644 index 00000000..7a7520fe --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .token import ( + TokenResource, + AsyncTokenResource, + TokenResourceWithRawResponse, + AsyncTokenResourceWithRawResponse, + TokenResourceWithStreamingResponse, + AsyncTokenResourceWithStreamingResponse, +) +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AgentsResource", "AsyncAgentsResource"] + + +class AgentsResource(SyncAPIResource): + @cached_property + def token(self) -> TokenResource: + return TokenResource(self._client) + + @cached_property + def with_raw_response(self) -> AgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AgentsResourceWithStreamingResponse(self) + + +class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def token(self) -> AsyncTokenResource: + return AsyncTokenResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAgentsResourceWithStreamingResponse(self) + + +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> TokenResourceWithRawResponse: + return TokenResourceWithRawResponse(self._agents.token) + + +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> AsyncTokenResourceWithRawResponse: + return AsyncTokenResourceWithRawResponse(self._agents.token) + + +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> TokenResourceWithStreamingResponse: + return TokenResourceWithStreamingResponse(self._agents.token) + + +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> AsyncTokenResourceWithStreamingResponse: + return AsyncTokenResourceWithStreamingResponse(self._agents.token) diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py b/src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py new file mode 100644 index 00000000..491258af --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py @@ -0,0 +1,173 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.genai.auth.agents import token_create_params +from .....types.genai.auth.agents.token_create_response import TokenCreateResponse + +__all__ = ["TokenResource", "AsyncTokenResource"] + + +class TokenResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> TokenResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return TokenResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> TokenResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return TokenResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TokenCreateResponse: + """ + To issue an agent token, send a POST request to + `/v2/gen-ai/auth/agents/{agent_uuid}/token`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/genai/auth/agents/{path_agent_uuid}/token", + body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TokenCreateResponse, + ) + + +class AsyncTokenResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncTokenResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncTokenResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TokenCreateResponse: + """ + To issue an agent token, send a POST request to + `/v2/gen-ai/auth/agents/{agent_uuid}/token`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/genai/auth/agents/{path_agent_uuid}/token", + body=await async_maybe_transform( + {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TokenCreateResponse, + ) + + +class TokenResourceWithRawResponse: + def __init__(self, token: TokenResource) -> None: + self._token = token + + self.create = to_raw_response_wrapper( + token.create, + ) + + +class AsyncTokenResourceWithRawResponse: + def __init__(self, token: AsyncTokenResource) -> None: + self._token = token + + self.create = async_to_raw_response_wrapper( + token.create, + ) + + +class TokenResourceWithStreamingResponse: + def __init__(self, token: TokenResource) -> None: + self._token = token + + self.create = to_streamed_response_wrapper( + token.create, + ) + + +class AsyncTokenResourceWithStreamingResponse: + def __init__(self, token: AsyncTokenResource) -> None: + self._token = token + + self.create = async_to_streamed_response_wrapper( + token.create, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/auth.py b/src/digitalocean_genai_sdk/resources/genai/auth/auth.py new file mode 100644 index 00000000..5de06f71 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/auth/auth.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from .agents.agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = ["AuthResource", "AsyncAuthResource"] + + +class AuthResource(SyncAPIResource): + @cached_property + def agents(self) -> AgentsResource: + return AgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AuthResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AuthResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AuthResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AuthResourceWithStreamingResponse(self) + + +class AsyncAuthResource(AsyncAPIResource): + @cached_property + def agents(self) -> AsyncAgentsResource: + return AsyncAgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAuthResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAuthResourceWithStreamingResponse(self) + + +class AuthResourceWithRawResponse: + def __init__(self, auth: AuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AgentsResourceWithRawResponse: + return AgentsResourceWithRawResponse(self._auth.agents) + + +class AsyncAuthResourceWithRawResponse: + def __init__(self, auth: AsyncAuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AsyncAgentsResourceWithRawResponse: + return AsyncAgentsResourceWithRawResponse(self._auth.agents) + + +class AuthResourceWithStreamingResponse: + def __init__(self, auth: AuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AgentsResourceWithStreamingResponse: + return AgentsResourceWithStreamingResponse(self._auth.agents) + + +class AsyncAuthResourceWithStreamingResponse: + def __init__(self, auth: AsyncAuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AsyncAgentsResourceWithStreamingResponse: + return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/digitalocean_genai_sdk/resources/genai/genai.py b/src/digitalocean_genai_sdk/resources/genai/genai.py new file mode 100644 index 00000000..12a38e42 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/genai.py @@ -0,0 +1,383 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...types import genai_retrieve_regions_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from .auth.auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .agents.agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) +from .models.models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from ..._base_client import make_request_options +from .providers.providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) +from .knowledge_bases.knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) +from ...types.genai_retrieve_regions_response import GenaiRetrieveRegionsResponse + +__all__ = ["GenaiResource", "AsyncGenaiResource"] + + +class GenaiResource(SyncAPIResource): + @cached_property + def agents(self) -> AgentsResource: + return AgentsResource(self._client) + + @cached_property + def providers(self) -> ProvidersResource: + return ProvidersResource(self._client) + + @cached_property + def auth(self) -> AuthResource: + return AuthResource(self._client) + + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + return IndexingJobsResource(self._client) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + return KnowledgeBasesResource(self._client) + + @cached_property + def models(self) -> ModelsResource: + return ModelsResource(self._client) + + @cached_property + def with_raw_response(self) -> GenaiResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return GenaiResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GenaiResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return GenaiResourceWithStreamingResponse(self) + + def retrieve_regions( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GenaiRetrieveRegionsResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + genai_retrieve_regions_params.GenaiRetrieveRegionsParams, + ), + ), + cast_to=GenaiRetrieveRegionsResponse, + ) + + +class AsyncGenaiResource(AsyncAPIResource): + @cached_property + def agents(self) -> AsyncAgentsResource: + return AsyncAgentsResource(self._client) + + @cached_property + def providers(self) -> AsyncProvidersResource: + return AsyncProvidersResource(self._client) + + @cached_property + def auth(self) -> AsyncAuthResource: + return AsyncAuthResource(self._client) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + return AsyncIndexingJobsResource(self._client) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + return AsyncKnowledgeBasesResource(self._client) + + @cached_property + def models(self) -> AsyncModelsResource: + return AsyncModelsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncGenaiResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncGenaiResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGenaiResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncGenaiResourceWithStreamingResponse(self) + + async def retrieve_regions( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GenaiRetrieveRegionsResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + genai_retrieve_regions_params.GenaiRetrieveRegionsParams, + ), + ), + cast_to=GenaiRetrieveRegionsResponse, + ) + + +class GenaiResourceWithRawResponse: + def __init__(self, genai: GenaiResource) -> None: + self._genai = genai + + self.retrieve_regions = to_raw_response_wrapper( + genai.retrieve_regions, + ) + + @cached_property + def agents(self) -> AgentsResourceWithRawResponse: + return AgentsResourceWithRawResponse(self._genai.agents) + + @cached_property + def providers(self) -> ProvidersResourceWithRawResponse: + return ProvidersResourceWithRawResponse(self._genai.providers) + + @cached_property + def auth(self) -> AuthResourceWithRawResponse: + return AuthResourceWithRawResponse(self._genai.auth) + + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: + return IndexingJobsResourceWithRawResponse(self._genai.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: + return KnowledgeBasesResourceWithRawResponse(self._genai.knowledge_bases) + + @cached_property + def models(self) -> ModelsResourceWithRawResponse: + return ModelsResourceWithRawResponse(self._genai.models) + + +class AsyncGenaiResourceWithRawResponse: + def __init__(self, genai: AsyncGenaiResource) -> None: + self._genai = genai + + self.retrieve_regions = async_to_raw_response_wrapper( + genai.retrieve_regions, + ) + + @cached_property + def agents(self) -> AsyncAgentsResourceWithRawResponse: + return AsyncAgentsResourceWithRawResponse(self._genai.agents) + + @cached_property + def providers(self) -> AsyncProvidersResourceWithRawResponse: + return AsyncProvidersResourceWithRawResponse(self._genai.providers) + + @cached_property + def auth(self) -> AsyncAuthResourceWithRawResponse: + return AsyncAuthResourceWithRawResponse(self._genai.auth) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: + return AsyncIndexingJobsResourceWithRawResponse(self._genai.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + return AsyncKnowledgeBasesResourceWithRawResponse(self._genai.knowledge_bases) + + @cached_property + def models(self) -> AsyncModelsResourceWithRawResponse: + return AsyncModelsResourceWithRawResponse(self._genai.models) + + +class GenaiResourceWithStreamingResponse: + def __init__(self, genai: GenaiResource) -> None: + self._genai = genai + + self.retrieve_regions = to_streamed_response_wrapper( + genai.retrieve_regions, + ) + + @cached_property + def agents(self) -> AgentsResourceWithStreamingResponse: + return AgentsResourceWithStreamingResponse(self._genai.agents) + + @cached_property + def providers(self) -> ProvidersResourceWithStreamingResponse: + return ProvidersResourceWithStreamingResponse(self._genai.providers) + + @cached_property + def auth(self) -> AuthResourceWithStreamingResponse: + return AuthResourceWithStreamingResponse(self._genai.auth) + + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: + return IndexingJobsResourceWithStreamingResponse(self._genai.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: + return KnowledgeBasesResourceWithStreamingResponse(self._genai.knowledge_bases) + + @cached_property + def models(self) -> ModelsResourceWithStreamingResponse: + return ModelsResourceWithStreamingResponse(self._genai.models) + + +class AsyncGenaiResourceWithStreamingResponse: + def __init__(self, genai: AsyncGenaiResource) -> None: + self._genai = genai + + self.retrieve_regions = async_to_streamed_response_wrapper( + genai.retrieve_regions, + ) + + @cached_property + def agents(self) -> AsyncAgentsResourceWithStreamingResponse: + return AsyncAgentsResourceWithStreamingResponse(self._genai.agents) + + @cached_property + def providers(self) -> AsyncProvidersResourceWithStreamingResponse: + return AsyncProvidersResourceWithStreamingResponse(self._genai.providers) + + @cached_property + def auth(self) -> AsyncAuthResourceWithStreamingResponse: + return AsyncAuthResourceWithStreamingResponse(self._genai.auth) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + return AsyncIndexingJobsResourceWithStreamingResponse(self._genai.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._genai.knowledge_bases) + + @cached_property + def models(self) -> AsyncModelsResourceWithStreamingResponse: + return AsyncModelsResourceWithStreamingResponse(self._genai.models) diff --git a/src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py b/src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py new file mode 100644 index 00000000..7c9366ad --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py @@ -0,0 +1,543 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.genai import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params +from ..._base_client import make_request_options +from ...types.genai.indexing_job_list_response import IndexingJobListResponse +from ...types.genai.indexing_job_create_response import IndexingJobCreateResponse +from ...types.genai.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ...types.genai.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ...types.genai.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse + +__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] + + +class IndexingJobsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return IndexingJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return IndexingJobsResourceWithStreamingResponse(self) + + def create( + self, + *, + data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobCreateResponse: + """ + To start an indexing job for a knowledge base, send a POST request to + `/v2/gen-ai/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/genai/indexing_jobs", + body=maybe_transform( + { + "data_source_uuids": data_source_uuids, + "knowledge_base_uuid": knowledge_base_uuid, + }, + indexing_job_create_params.IndexingJobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobCreateResponse, + ) + + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveResponse: + """ + To get status of an indexing Job for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/genai/indexing_jobs/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobListResponse: + """ + To list all indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + indexing_job_list_params.IndexingJobListParams, + ), + ), + cast_to=IndexingJobListResponse, + ) + + def retrieve_data_sources( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveDataSourcesResponse: + """ + To list all datasources for an indexing job, send a GET request to + `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return self._get( + f"/v2/genai/indexing_jobs/{indexing_job_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveDataSourcesResponse, + ) + + def update_cancel( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobUpdateCancelResponse: + """ + To cancel an indexing job for a knowledge base, send a PUT request to + `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. + + Args: + body_uuid: A unique identifier for an indexing job. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/genai/indexing_jobs/{path_uuid}/cancel", + body=maybe_transform( + {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobUpdateCancelResponse, + ) + + +class AsyncIndexingJobsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncIndexingJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncIndexingJobsResourceWithStreamingResponse(self) + + async def create( + self, + *, + data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobCreateResponse: + """ + To start an indexing job for a knowledge base, send a POST request to + `/v2/gen-ai/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/genai/indexing_jobs", + body=await async_maybe_transform( + { + "data_source_uuids": data_source_uuids, + "knowledge_base_uuid": knowledge_base_uuid, + }, + indexing_job_create_params.IndexingJobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobCreateResponse, + ) + + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveResponse: + """ + To get status of an indexing Job for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/genai/indexing_jobs/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobListResponse: + """ + To list all indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + indexing_job_list_params.IndexingJobListParams, + ), + ), + cast_to=IndexingJobListResponse, + ) + + async def retrieve_data_sources( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveDataSourcesResponse: + """ + To list all datasources for an indexing job, send a GET request to + `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return await self._get( + f"/v2/genai/indexing_jobs/{indexing_job_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveDataSourcesResponse, + ) + + async def update_cancel( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobUpdateCancelResponse: + """ + To cancel an indexing job for a knowledge base, send a PUT request to + `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. + + Args: + body_uuid: A unique identifier for an indexing job. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/genai/indexing_jobs/{path_uuid}/cancel", + body=await async_maybe_transform( + {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobUpdateCancelResponse, + ) + + +class IndexingJobsResourceWithRawResponse: + def __init__(self, indexing_jobs: IndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = to_raw_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = to_raw_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = to_raw_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = to_raw_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = to_raw_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class AsyncIndexingJobsResourceWithRawResponse: + def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = async_to_raw_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = async_to_raw_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = async_to_raw_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = async_to_raw_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class IndexingJobsResourceWithStreamingResponse: + def __init__(self, indexing_jobs: IndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = to_streamed_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = to_streamed_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = to_streamed_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = to_streamed_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = to_streamed_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class AsyncIndexingJobsResourceWithStreamingResponse: + def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = async_to_streamed_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = async_to_streamed_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = async_to_streamed_response_wrapper( + indexing_jobs.update_cancel, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/__init__.py new file mode 100644 index 00000000..03d143e2 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .data_sources import ( + DataSourcesResource, + AsyncDataSourcesResource, + DataSourcesResourceWithRawResponse, + AsyncDataSourcesResourceWithRawResponse, + DataSourcesResourceWithStreamingResponse, + AsyncDataSourcesResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) + +__all__ = [ + "DataSourcesResource", + "AsyncDataSourcesResource", + "DataSourcesResourceWithRawResponse", + "AsyncDataSourcesResourceWithRawResponse", + "DataSourcesResourceWithStreamingResponse", + "AsyncDataSourcesResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py b/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py new file mode 100644 index 00000000..3a348b93 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py @@ -0,0 +1,410 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.knowledge_bases import ( + data_source_list_params, + data_source_create_params, +) +from ....types.genai.knowledge_bases.data_source_list_response import DataSourceListResponse +from ....types.genai.knowledge_bases.data_source_create_response import DataSourceCreateResponse +from ....types.genai.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse +from ....types.genai.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from ....types.genai.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] + + +class DataSourcesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DataSourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return DataSourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return DataSourcesResourceWithStreamingResponse(self) + + def create( + self, + path_knowledge_base_uuid: str, + *, + aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, + web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceCreateResponse: + """ + To add a data source to a knowledge base, send a POST request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" + ) + return self._post( + f"/v2/genai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + body=maybe_transform( + { + "aws_data_source": aws_data_source, + "body_knowledge_base_uuid": body_knowledge_base_uuid, + "spaces_data_source": spaces_data_source, + "web_crawler_data_source": web_crawler_data_source, + }, + data_source_create_params.DataSourceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreateResponse, + ) + + def list( + self, + knowledge_base_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceListResponse: + """ + To list all data sources for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._get( + f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + data_source_list_params.DataSourceListParams, + ), + ), + cast_to=DataSourceListResponse, + ) + + def delete( + self, + data_source_uuid: str, + *, + knowledge_base_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceDeleteResponse: + """ + To delete a data source from a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + if not data_source_uuid: + raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") + return self._delete( + f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceDeleteResponse, + ) + + +class AsyncDataSourcesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncDataSourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncDataSourcesResourceWithStreamingResponse(self) + + async def create( + self, + path_knowledge_base_uuid: str, + *, + aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, + web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceCreateResponse: + """ + To add a data source to a knowledge base, send a POST request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" + ) + return await self._post( + f"/v2/genai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + body=await async_maybe_transform( + { + "aws_data_source": aws_data_source, + "body_knowledge_base_uuid": body_knowledge_base_uuid, + "spaces_data_source": spaces_data_source, + "web_crawler_data_source": web_crawler_data_source, + }, + data_source_create_params.DataSourceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreateResponse, + ) + + async def list( + self, + knowledge_base_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceListResponse: + """ + To list all data sources for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._get( + f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + data_source_list_params.DataSourceListParams, + ), + ), + cast_to=DataSourceListResponse, + ) + + async def delete( + self, + data_source_uuid: str, + *, + knowledge_base_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceDeleteResponse: + """ + To delete a data source from a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + if not data_source_uuid: + raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") + return await self._delete( + f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceDeleteResponse, + ) + + +class DataSourcesResourceWithRawResponse: + def __init__(self, data_sources: DataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = to_raw_response_wrapper( + data_sources.create, + ) + self.list = to_raw_response_wrapper( + data_sources.list, + ) + self.delete = to_raw_response_wrapper( + data_sources.delete, + ) + + +class AsyncDataSourcesResourceWithRawResponse: + def __init__(self, data_sources: AsyncDataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = async_to_raw_response_wrapper( + data_sources.create, + ) + self.list = async_to_raw_response_wrapper( + data_sources.list, + ) + self.delete = async_to_raw_response_wrapper( + data_sources.delete, + ) + + +class DataSourcesResourceWithStreamingResponse: + def __init__(self, data_sources: DataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = to_streamed_response_wrapper( + data_sources.create, + ) + self.list = to_streamed_response_wrapper( + data_sources.list, + ) + self.delete = to_streamed_response_wrapper( + data_sources.delete, + ) + + +class AsyncDataSourcesResourceWithStreamingResponse: + def __init__(self, data_sources: AsyncDataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = async_to_streamed_response_wrapper( + data_sources.create, + ) + self.list = async_to_streamed_response_wrapper( + data_sources.list, + ) + self.delete = async_to_streamed_response_wrapper( + data_sources.delete, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py new file mode 100644 index 00000000..138d6ef8 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py @@ -0,0 +1,667 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .data_sources import ( + DataSourcesResource, + AsyncDataSourcesResource, + DataSourcesResourceWithRawResponse, + AsyncDataSourcesResourceWithRawResponse, + DataSourcesResourceWithStreamingResponse, + AsyncDataSourcesResourceWithStreamingResponse, +) +from ....types.genai import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params +from ...._base_client import make_request_options +from ....types.genai.knowledge_base_list_response import KnowledgeBaseListResponse +from ....types.genai.knowledge_base_create_response import KnowledgeBaseCreateResponse +from ....types.genai.knowledge_base_delete_response import KnowledgeBaseDeleteResponse +from ....types.genai.knowledge_base_update_response import KnowledgeBaseUpdateResponse +from ....types.genai.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse + +__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] + + +class KnowledgeBasesResource(SyncAPIResource): + @cached_property + def data_sources(self) -> DataSourcesResource: + return DataSourcesResource(self._client) + + @cached_property + def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return KnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return KnowledgeBasesResourceWithStreamingResponse(self) + + def create( + self, + *, + database_id: str | NotGiven = NOT_GIVEN, + datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseCreateResponse: + """ + To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. + + Args: + database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + + datasources: The data sources to use for this knowledge base. See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + + embedding_model_uuid: Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + + name: Name of the knowledge base. + + project_id: Identifier of the DigitalOcean project this knowledge base will belong to. + + region: The datacenter region to deploy the knowledge base in. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/genai/knowledge_bases", + body=maybe_transform( + { + "database_id": database_id, + "datasources": datasources, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "region": region, + "tags": tags, + "vpc_uuid": vpc_uuid, + }, + knowledge_base_create_params.KnowledgeBaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseCreateResponse, + ) + + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseRetrieveResponse: + """ + To retrive information about an existing knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/genai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseRetrieveResponse, + ) + + def update( + self, + path_uuid: str, + *, + database_id: str | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseUpdateResponse: + """ + To update a knowledge base, send a PUT request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + + embedding_model_uuid: Identifier for the foundation model. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/genai/knowledge_bases/{path_uuid}", + body=maybe_transform( + { + "database_id": database_id, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "tags": tags, + "body_uuid": body_uuid, + }, + knowledge_base_update_params.KnowledgeBaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseListResponse: + """ + To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + knowledge_base_list_params.KnowledgeBaseListParams, + ), + ), + cast_to=KnowledgeBaseListResponse, + ) + + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDeleteResponse: + """ + To delete a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/genai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDeleteResponse, + ) + + +class AsyncKnowledgeBasesResource(AsyncAPIResource): + @cached_property + def data_sources(self) -> AsyncDataSourcesResource: + return AsyncDataSourcesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncKnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncKnowledgeBasesResourceWithStreamingResponse(self) + + async def create( + self, + *, + database_id: str | NotGiven = NOT_GIVEN, + datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseCreateResponse: + """ + To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. + + Args: + database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + + datasources: The data sources to use for this knowledge base. See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + + embedding_model_uuid: Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + + name: Name of the knowledge base. + + project_id: Identifier of the DigitalOcean project this knowledge base will belong to. + + region: The datacenter region to deploy the knowledge base in. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/genai/knowledge_bases", + body=await async_maybe_transform( + { + "database_id": database_id, + "datasources": datasources, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "region": region, + "tags": tags, + "vpc_uuid": vpc_uuid, + }, + knowledge_base_create_params.KnowledgeBaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseCreateResponse, + ) + + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseRetrieveResponse: + """ + To retrive information about an existing knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/genai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseRetrieveResponse, + ) + + async def update( + self, + path_uuid: str, + *, + database_id: str | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseUpdateResponse: + """ + To update a knowledge base, send a PUT request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + + embedding_model_uuid: Identifier for the foundation model. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/genai/knowledge_bases/{path_uuid}", + body=await async_maybe_transform( + { + "database_id": database_id, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "tags": tags, + "body_uuid": body_uuid, + }, + knowledge_base_update_params.KnowledgeBaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseListResponse: + """ + To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + knowledge_base_list_params.KnowledgeBaseListParams, + ), + ), + cast_to=KnowledgeBaseListResponse, + ) + + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDeleteResponse: + """ + To delete a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/genai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDeleteResponse, + ) + + +class KnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = to_raw_response_wrapper( + knowledge_bases.create, + ) + self.retrieve = to_raw_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = to_raw_response_wrapper( + knowledge_bases.update, + ) + self.list = to_raw_response_wrapper( + knowledge_bases.list, + ) + self.delete = to_raw_response_wrapper( + knowledge_bases.delete, + ) + + @cached_property + def data_sources(self) -> DataSourcesResourceWithRawResponse: + return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + + +class AsyncKnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = async_to_raw_response_wrapper( + knowledge_bases.create, + ) + self.retrieve = async_to_raw_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = async_to_raw_response_wrapper( + knowledge_bases.update, + ) + self.list = async_to_raw_response_wrapper( + knowledge_bases.list, + ) + self.delete = async_to_raw_response_wrapper( + knowledge_bases.delete, + ) + + @cached_property + def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: + return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + + +class KnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = to_streamed_response_wrapper( + knowledge_bases.create, + ) + self.retrieve = to_streamed_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = to_streamed_response_wrapper( + knowledge_bases.update, + ) + self.list = to_streamed_response_wrapper( + knowledge_bases.list, + ) + self.delete = to_streamed_response_wrapper( + knowledge_bases.delete, + ) + + @cached_property + def data_sources(self) -> DataSourcesResourceWithStreamingResponse: + return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + + +class AsyncKnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = async_to_streamed_response_wrapper( + knowledge_bases.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + knowledge_bases.update, + ) + self.list = async_to_streamed_response_wrapper( + knowledge_bases.list, + ) + self.delete = async_to_streamed_response_wrapper( + knowledge_bases.delete, + ) + + @cached_property + def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: + return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) diff --git a/src/digitalocean_genai_sdk/resources/genai/models/__init__.py b/src/digitalocean_genai_sdk/resources/genai/models/__init__.py new file mode 100644 index 00000000..d7dd650c --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/models/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/models/api_keys.py b/src/digitalocean_genai_sdk/resources/genai/models/api_keys.py new file mode 100644 index 00000000..58a16be2 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/models/api_keys.py @@ -0,0 +1,529 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.genai.models import api_key_list_params, api_key_create_params, api_key_update_params +from ....types.genai.models.api_key_list_response import APIKeyListResponse +from ....types.genai.models.api_key_create_response import APIKeyCreateResponse +from ....types.genai.models.api_key_delete_response import APIKeyDeleteResponse +from ....types.genai.models.api_key_update_response import APIKeyUpdateResponse +from ....types.genai.models.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/genai/models/api_keys", + body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/genai/models/api_keys/{path_api_key_uuid}", + body=maybe_transform( + { + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/models/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for a model, send a DELETE request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/genai/models/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + def update_regenerate( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateRegenerateResponse: + """ + To regenerate a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._put( + f"/v2/genai/models/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateRegenerateResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/genai/models/api_keys", + body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/genai/models/api_keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/models/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for a model, send a DELETE request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/genai/models/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + async def update_regenerate( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateRegenerateResponse: + """ + To regenerate a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._put( + f"/v2/genai/models/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateRegenerateResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_raw_response_wrapper( + api_keys.create, + ) + self.update = to_raw_response_wrapper( + api_keys.update, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = to_raw_response_wrapper( + api_keys.update_regenerate, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_raw_response_wrapper( + api_keys.create, + ) + self.update = async_to_raw_response_wrapper( + api_keys.update, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = async_to_raw_response_wrapper( + api_keys.update_regenerate, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_streamed_response_wrapper( + api_keys.create, + ) + self.update = to_streamed_response_wrapper( + api_keys.update, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = to_streamed_response_wrapper( + api_keys.update_regenerate, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_streamed_response_wrapper( + api_keys.create, + ) + self.update = async_to_streamed_response_wrapper( + api_keys.update, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = async_to_streamed_response_wrapper( + api_keys.update_regenerate, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/models/models.py b/src/digitalocean_genai_sdk/resources/genai/models/models.py new file mode 100644 index 00000000..6273086b --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/models/models.py @@ -0,0 +1,282 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....types.genai import model_list_params +from ...._base_client import make_request_options +from ....types.genai.model_list_response import ModelListResponse + +__all__ = ["ModelsResource", "AsyncModelsResource"] + + +class ModelsResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> ModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ModelsResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), + ), + cast_to=ModelListResponse, + ) + + +class AsyncModelsResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncModelsResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), + ), + cast_to=ModelListResponse, + ) + + +class ModelsResourceWithRawResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.list = to_raw_response_wrapper( + models.list, + ) + + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._models.api_keys) + + +class AsyncModelsResourceWithRawResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.list = async_to_raw_response_wrapper( + models.list, + ) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._models.api_keys) + + +class ModelsResourceWithStreamingResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.list = to_streamed_response_wrapper( + models.list, + ) + + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._models.api_keys) + + +class AsyncModelsResourceWithStreamingResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.list = async_to_streamed_response_wrapper( + models.list, + ) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._models.api_keys) diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/__init__.py b/src/digitalocean_genai_sdk/resources/genai/providers/__init__.py new file mode 100644 index 00000000..1731e057 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) + +__all__ = [ + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/__init__.py new file mode 100644 index 00000000..057a3a2f --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py b/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py new file mode 100644 index 00000000..c996e2cc --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AnthropicResource", "AsyncAnthropicResource"] + + +class AnthropicResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AnthropicResourceWithStreamingResponse(self) + + +class AsyncAnthropicResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncAnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncAnthropicResourceWithStreamingResponse(self) + + +class AnthropicResourceWithRawResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithRawResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._anthropic.keys) + + +class AnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py b/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py new file mode 100644 index 00000000..e9ee7130 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py @@ -0,0 +1,667 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.genai.providers.anthropic import ( + key_list_params, + key_create_params, + key_update_params, + key_list_agents_params, +) +from .....types.genai.providers.anthropic.key_list_response import KeyListResponse +from .....types.genai.providers.anthropic.key_create_response import KeyCreateResponse +from .....types.genai.providers.anthropic.key_delete_response import KeyDeleteResponse +from .....types.genai.providers.anthropic.key_update_response import KeyUpdateResponse +from .....types.genai.providers.anthropic.key_retrieve_response import KeyRetrieveResponse +from .....types.genai.providers.anthropic.key_list_agents_response import KeyListAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/genai/anthropic/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/genai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/genai/anthropic/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/genai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/genai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/genai/anthropic/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/genai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/genai/anthropic/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/genai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/genai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = to_raw_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_raw_response_wrapper( + keys.list_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = to_streamed_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_streamed_response_wrapper( + keys.list_agents, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/openai/__init__.py b/src/digitalocean_genai_sdk/resources/genai/providers/openai/__init__.py new file mode 100644 index 00000000..66d8ca7a --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/openai/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py b/src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py new file mode 100644 index 00000000..ae9a3a01 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py @@ -0,0 +1,663 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.genai.providers.openai import ( + key_list_params, + key_create_params, + key_update_params, + key_retrieve_agents_params, +) +from .....types.genai.providers.openai.key_list_response import KeyListResponse +from .....types.genai.providers.openai.key_create_response import KeyCreateResponse +from .....types.genai.providers.openai.key_delete_response import KeyDeleteResponse +from .....types.genai.providers.openai.key_update_response import KeyUpdateResponse +from .....types.genai.providers.openai.key_retrieve_response import KeyRetrieveResponse +from .....types.genai.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/genai/openai/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/genai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/genai/openai/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/genai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/genai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/genai/openai/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/genai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/genai/openai/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/genai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/genai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_streamed_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_streamed_response_wrapper( + keys.retrieve_agents, + ) diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py b/src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py new file mode 100644 index 00000000..a0c5a373 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["OpenAIResource", "AsyncOpenAIResource"] + + +class OpenAIResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> OpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return OpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return OpenAIResourceWithStreamingResponse(self) + + +class AsyncOpenAIResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncOpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncOpenAIResourceWithStreamingResponse(self) + + +class OpenAIResourceWithRawResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithRawResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._openai.keys) + + +class OpenAIResourceWithStreamingResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithStreamingResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/providers.py b/src/digitalocean_genai_sdk/resources/genai/providers/providers.py new file mode 100644 index 00000000..fa3262d7 --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/genai/providers/providers.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from .openai.openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic.anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = ["ProvidersResource", "AsyncProvidersResource"] + + +class ProvidersResource(SyncAPIResource): + @cached_property + def anthropic(self) -> AnthropicResource: + return AnthropicResource(self._client) + + @cached_property + def openai(self) -> OpenAIResource: + return OpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> ProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return ProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return ProvidersResourceWithStreamingResponse(self) + + +class AsyncProvidersResource(AsyncAPIResource): + @cached_property + def anthropic(self) -> AsyncAnthropicResource: + return AsyncAnthropicResource(self._client) + + @cached_property + def openai(self) -> AsyncOpenAIResource: + return AsyncOpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncProvidersResourceWithStreamingResponse(self) + + +class ProvidersResourceWithRawResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithRawResponse: + return AnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithRawResponse: + return OpenAIResourceWithRawResponse(self._providers.openai) + + +class AsyncProvidersResourceWithRawResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: + return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithRawResponse: + return AsyncOpenAIResourceWithRawResponse(self._providers.openai) + + +class ProvidersResourceWithStreamingResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithStreamingResponse: + return AnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithStreamingResponse: + return OpenAIResourceWithStreamingResponse(self._providers.openai) + + +class AsyncProvidersResourceWithStreamingResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: + return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: + return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py index 81b75441..0fbf131b 100644 --- a/src/digitalocean_genai_sdk/resources/models.py +++ b/src/digitalocean_genai_sdk/resources/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py index 144bfd42..342f0444 100644 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -5,6 +5,12 @@ from .model import Model as Model from .model_list_response import ModelListResponse as ModelListResponse from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams -from .stop_configuration_param import StopConfigurationParam as StopConfigurationParam from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse -from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams +from .genai_retrieve_regions_params import GenaiRetrieveRegionsParams as GenaiRetrieveRegionsParams +from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .genai_retrieve_regions_response import GenaiRetrieveRegionsResponse as GenaiRetrieveRegionsResponse +from .chat_completion_request_message_content_part_text_param import ( + ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, +) diff --git a/src/digitalocean_genai_sdk/types/chat/__init__.py b/src/digitalocean_genai_sdk/types/chat/__init__.py deleted file mode 100644 index 7dbba0c2..00000000 --- a/src/digitalocean_genai_sdk/types/chat/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .usage import Usage as Usage -from .token_logprob import TokenLogprob as TokenLogprob -from .create_response import CreateResponse as CreateResponse -from .response_message import ResponseMessage as ResponseMessage -from .completion_create_params import CompletionCreateParams as CompletionCreateParams -from .request_message_content_part_text_param import ( - RequestMessageContentPartTextParam as RequestMessageContentPartTextParam, -) diff --git a/src/digitalocean_genai_sdk/types/chat/response_message.py b/src/digitalocean_genai_sdk/types/chat/response_message.py deleted file mode 100644 index 22e81c9b..00000000 --- a/src/digitalocean_genai_sdk/types/chat/response_message.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseMessage"] - - -class ResponseMessage(BaseModel): - content: Optional[str] = None - """The contents of the message.""" - - refusal: Optional[str] = None - """The refusal message generated by the model.""" - - role: Literal["assistant"] - """The role of the author of this message.""" diff --git a/src/digitalocean_genai_sdk/types/chat/usage.py b/src/digitalocean_genai_sdk/types/chat/usage.py deleted file mode 100644 index a3785b9f..00000000 --- a/src/digitalocean_genai_sdk/types/chat/usage.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from ..._models import BaseModel - -__all__ = ["Usage"] - - -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" diff --git a/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py b/src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py similarity index 68% rename from src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py rename to src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py index 8e83e40b..4aec9488 100644 --- a/src/digitalocean_genai_sdk/types/chat/request_message_content_part_text_param.py +++ b/src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py @@ -4,10 +4,10 @@ from typing_extensions import Literal, Required, TypedDict -__all__ = ["RequestMessageContentPartTextParam"] +__all__ = ["ChatCompletionRequestMessageContentPartTextParam"] -class RequestMessageContentPartTextParam(TypedDict, total=False): +class ChatCompletionRequestMessageContentPartTextParam(TypedDict, total=False): text: Required[str] """The text content.""" diff --git a/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py b/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py deleted file mode 100644 index 471e0eba..00000000 --- a/src/digitalocean_genai_sdk/types/chat_completion_stream_options_param.py +++ /dev/null @@ -1,20 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["ChatCompletionStreamOptionsParam"] - - -class ChatCompletionStreamOptionsParam(TypedDict, total=False): - include_usage: bool - """If set, an additional chunk will be streamed before the `data: [DONE]` message. - - The `usage` field on this chunk shows the token usage statistics for the entire - request, and the `choices` field will always be an empty array. - - All other chunks will also include a `usage` field, but with a null value. - **NOTE:** If the stream is interrupted, you may not receive the final usage - chunk which contains the total token usage for the request. - """ diff --git a/src/digitalocean_genai_sdk/types/chat/token_logprob.py b/src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py similarity index 92% rename from src/digitalocean_genai_sdk/types/chat/token_logprob.py rename to src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py index d31943f6..78de1dfa 100644 --- a/src/digitalocean_genai_sdk/types/chat/token_logprob.py +++ b/src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py @@ -2,9 +2,9 @@ from typing import List, Optional -from ..._models import BaseModel +from .._models import BaseModel -__all__ = ["TokenLogprob", "TopLogprob"] +__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] class TopLogprob(BaseModel): @@ -28,7 +28,7 @@ class TopLogprob(BaseModel): """ -class TokenLogprob(BaseModel): +class ChatCompletionTokenLogprob(BaseModel): token: str """The token.""" diff --git a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py b/src/digitalocean_genai_sdk/types/chat_create_completion_params.py similarity index 83% rename from src/digitalocean_genai_sdk/types/chat/completion_create_params.py rename to src/digitalocean_genai_sdk/types/chat_create_completion_params.py index fcbf22bb..05c427b1 100644 --- a/src/digitalocean_genai_sdk/types/chat/completion_create_params.py +++ b/src/digitalocean_genai_sdk/types/chat_create_completion_params.py @@ -2,15 +2,13 @@ from __future__ import annotations -from typing import Dict, Union, Iterable, Optional +from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..stop_configuration_param import StopConfigurationParam -from ..chat_completion_stream_options_param import ChatCompletionStreamOptionsParam -from .request_message_content_part_text_param import RequestMessageContentPartTextParam +from .chat_completion_request_message_content_part_text_param import ChatCompletionRequestMessageContentPartTextParam __all__ = [ - "CompletionCreateParams", + "ChatCreateCompletionParams", "Message", "MessageChatCompletionRequestSystemMessage", "MessageChatCompletionRequestDeveloperMessage", @@ -18,10 +16,11 @@ "MessageChatCompletionRequestAssistantMessage", "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal", + "StreamOptions", ] -class CompletionCreateParams(TypedDict, total=False): +class ChatCreateCompletionParams(TypedDict, total=False): messages: Required[Iterable[Message]] """A list of messages comprising the conversation so far.""" @@ -91,7 +90,7 @@ class CompletionCreateParams(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - stop: Optional[StopConfigurationParam] + stop: Union[Optional[str], List[str], None] """Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -103,7 +102,7 @@ class CompletionCreateParams(TypedDict, total=False): generated using server-sent events. """ - stream_options: Optional[ChatCompletionStreamOptionsParam] + stream_options: Optional[StreamOptions] """Options for streaming response. Only set this when you set `stream: true`.""" temperature: Optional[float] @@ -138,7 +137,7 @@ class CompletionCreateParams(TypedDict, total=False): class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] """The contents of the system message.""" role: Required[Literal["system"]] @@ -146,7 +145,7 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] """The contents of the developer message.""" role: Required[Literal["developer"]] @@ -154,7 +153,7 @@ class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[RequestMessageContentPartTextParam]]] + content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] """The contents of the user message.""" role: Required[Literal["user"]] @@ -172,7 +171,7 @@ class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatC MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ - RequestMessageContentPartTextParam, + ChatCompletionRequestMessageContentPartTextParam, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal, ] @@ -194,3 +193,16 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, ] + + +class StreamOptions(TypedDict, total=False): + include_usage: bool + """If set, an additional chunk will be streamed before the `data: [DONE]` message. + + The `usage` field on this chunk shows the token usage statistics for the entire + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. + """ diff --git a/src/digitalocean_genai_sdk/types/chat/create_response.py b/src/digitalocean_genai_sdk/types/chat_create_completion_response.py similarity index 60% rename from src/digitalocean_genai_sdk/types/chat/create_response.py rename to src/digitalocean_genai_sdk/types/chat_create_completion_response.py index c80c56ac..e1f20038 100644 --- a/src/digitalocean_genai_sdk/types/chat/create_response.py +++ b/src/digitalocean_genai_sdk/types/chat_create_completion_response.py @@ -3,22 +3,31 @@ from typing import List, Optional from typing_extensions import Literal -from .usage import Usage -from ..._models import BaseModel -from .token_logprob import TokenLogprob -from .response_message import ResponseMessage +from .._models import BaseModel +from .chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["CreateResponse", "Choice", "ChoiceLogprobs"] +__all__ = ["ChatCreateCompletionResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] class ChoiceLogprobs(BaseModel): - content: Optional[List[TokenLogprob]] = None + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" - refusal: Optional[List[TokenLogprob]] = None + refusal: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message refusal tokens with log probability information.""" +class ChoiceMessage(BaseModel): + content: Optional[str] = None + """The contents of the message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Literal["assistant"] + """The role of the author of this message.""" + + class Choice(BaseModel): finish_reason: Literal["stop", "length"] """The reason the model stopped generating tokens. @@ -34,11 +43,22 @@ class Choice(BaseModel): logprobs: Optional[ChoiceLogprobs] = None """Log probability information for the choice.""" - message: ResponseMessage + message: ChoiceMessage """A chat completion message generated by the model.""" -class CreateResponse(BaseModel): +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + +class ChatCreateCompletionResponse(BaseModel): id: str """A unique identifier for the chat completion.""" diff --git a/src/digitalocean_genai_sdk/types/genai/__init__.py b/src/digitalocean_genai_sdk/types/genai/__init__.py new file mode 100644 index 00000000..486ef2b0 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/__init__.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_agent import APIAgent as APIAgent +from .api_model import APIModel as APIModel +from .api_agreement import APIAgreement as APIAgreement +from .api_indexing_job import APIIndexingJob as APIIndexingJob +from .agent_list_params import AgentListParams as AgentListParams +from .api_model_version import APIModelVersion as APIModelVersion +from .model_list_params import ModelListParams as ModelListParams +from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_update_params import AgentUpdateParams as AgentUpdateParams +from .model_list_response import ModelListResponse as ModelListResponse +from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse +from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams +from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility +from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse +from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse +from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams +from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse +from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse +from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse +from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse +from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse +from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) diff --git a/src/digitalocean_genai_sdk/types/genai/agent_create_params.py b/src/digitalocean_genai_sdk/types/genai/agent_create_params.py new file mode 100644 index 00000000..ad801f17 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_create_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["AgentCreateParams"] + + +class AgentCreateParams(TypedDict, total=False): + anthropic_key_uuid: str + + description: str + + instruction: str + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + knowledge_base_uuid: List[str] + + model_uuid: str + """Identifier for the foundation model.""" + + name: str + + openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + + project_id: str + + region: str + + tags: List[str] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py b/src/digitalocean_genai_sdk/types/genai/agent_create_response.py new file mode 100644 index 00000000..fc987cf2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_create_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel + +__all__ = ["AgentCreateResponse"] + + +class AgentCreateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent + +if PYDANTIC_V2: + AgentCreateResponse.model_rebuild() +else: + AgentCreateResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py new file mode 100644 index 00000000..aa2cb139 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel + +__all__ = ["AgentDeleteResponse"] + + +class AgentDeleteResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent + +if PYDANTIC_V2: + AgentDeleteResponse.model_rebuild() +else: + AgentDeleteResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agent_list_params.py b/src/digitalocean_genai_sdk/types/genai/agent_list_params.py new file mode 100644 index 00000000..e13a10c9 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AgentListParams"] + + +class AgentListParams(TypedDict, total=False): + only_deployed: bool + """only list agents that are deployed.""" + + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/agent_list_response.py b/src/digitalocean_genai_sdk/types/genai/agent_list_response.py new file mode 100644 index 00000000..4a7f5f89 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_list_response.py @@ -0,0 +1,198 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from .api_model import APIModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_deployment_visibility import APIDeploymentVisibility + +__all__ = [ + "AgentListResponse", + "Agent", + "AgentChatbot", + "AgentChatbotIdentifier", + "AgentDeployment", + "AgentTemplate", + "AgentTemplateGuardrail", +] + + +class AgentChatbot(BaseModel): + button_background_color: Optional[str] = None + + logo: Optional[str] = None + + name: Optional[str] = None + + primary_color: Optional[str] = None + + secondary_color: Optional[str] = None + + starting_message: Optional[str] = None + + +class AgentChatbotIdentifier(BaseModel): + agent_chatbot_identifier: Optional[str] = None + + +class AgentDeployment(BaseModel): + created_at: Optional[datetime] = None + + name: Optional[str] = None + + status: Optional[ + Literal[ + "STATUS_UNKNOWN", + "STATUS_WAITING_FOR_DEPLOYMENT", + "STATUS_DEPLOYING", + "STATUS_RUNNING", + "STATUS_FAILED", + "STATUS_WAITING_FOR_UNDEPLOYMENT", + "STATUS_UNDEPLOYING", + "STATUS_UNDEPLOYMENT_FAILED", + "STATUS_DELETED", + ] + ] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + visibility: Optional[APIDeploymentVisibility] = None + + +class AgentTemplateGuardrail(BaseModel): + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class AgentTemplate(BaseModel): + created_at: Optional[datetime] = None + + description: Optional[str] = None + + guardrails: Optional[List[AgentTemplateGuardrail]] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + long_description: Optional[str] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + short_description: Optional[str] = None + + summary: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class Agent(BaseModel): + chatbot: Optional[AgentChatbot] = None + + chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None + + created_at: Optional[datetime] = None + + deployment: Optional[AgentDeployment] = None + + description: Optional[str] = None + + if_case: Optional[str] = None + + instruction: Optional[str] = None + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: Optional[int] = None + + max_tokens: Optional[int] = None + """ + Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + """ + + model: Optional[APIModel] = None + + name: Optional[str] = None + + project_id: Optional[str] = None + + provide_citations: Optional[bool] = None + + region: Optional[str] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + route_created_at: Optional[datetime] = None + + route_created_by: Optional[str] = None + + route_name: Optional[str] = None + + route_uuid: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + """Controls the model’s creativity, specified as a number between 0 and 1. + + Lower values produce more predictable and conservative responses, while higher + values encourage creativity and variation. + """ + + template: Optional[AgentTemplate] = None + + top_p: Optional[float] = None + """ + Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + """ + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentListResponse(BaseModel): + agents: Optional[List[Agent]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py new file mode 100644 index 00000000..3c671f1c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel + +__all__ = ["AgentRetrieveResponse"] + + +class AgentRetrieveResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent + +if PYDANTIC_V2: + AgentRetrieveResponse.model_rebuild() +else: + AgentRetrieveResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_params.py b/src/digitalocean_genai_sdk/types/genai/agent_update_params.py new file mode 100644 index 00000000..7d8b2616 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_params.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo +from .api_retrieval_method import APIRetrievalMethod + +__all__ = ["AgentUpdateParams"] + + +class AgentUpdateParams(TypedDict, total=False): + anthropic_key_uuid: str + + description: str + + instruction: str + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: int + + max_tokens: int + """ + Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + """ + + model_uuid: str + """Identifier for the foundation model.""" + + name: str + + openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + + project_id: str + + provide_citations: bool + + retrieval_method: APIRetrievalMethod + + tags: List[str] + + temperature: float + """Controls the model’s creativity, specified as a number between 0 and 1. + + Lower values produce more predictable and conservative responses, while higher + values encourage creativity and variation. + """ + + top_p: float + """ + Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + """ + + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py b/src/digitalocean_genai_sdk/types/genai/agent_update_response.py new file mode 100644 index 00000000..35c64f79 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel + +__all__ = ["AgentUpdateResponse"] + + +class AgentUpdateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent + +if PYDANTIC_V2: + AgentUpdateResponse.model_rebuild() +else: + AgentUpdateResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py b/src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py new file mode 100644 index 00000000..675b0043 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo +from .api_deployment_visibility import APIDeploymentVisibility + +__all__ = ["AgentUpdateStatusParams"] + + +class AgentUpdateStatusParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + + visibility: APIDeploymentVisibility diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py b/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py new file mode 100644 index 00000000..cfda9b73 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel + +__all__ = ["AgentUpdateStatusResponse"] + + +class AgentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent + +if PYDANTIC_V2: + AgentUpdateStatusResponse.model_rebuild() +else: + AgentUpdateStatusResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/__init__.py b/src/digitalocean_genai_sdk/types/genai/agents/__init__.py new file mode 100644 index 00000000..b9f85c2d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/__init__.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_meta import APIMeta as APIMeta +from .api_links import APILinks as APILinks +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .version_list_params import VersionListParams as VersionListParams +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .version_list_response import VersionListResponse as VersionListResponse +from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams +from .function_create_params import FunctionCreateParams as FunctionCreateParams +from .function_update_params import FunctionUpdateParams as FunctionUpdateParams +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse +from .version_update_response import VersionUpdateResponse as VersionUpdateResponse +from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse +from .function_create_response import FunctionCreateResponse as FunctionCreateResponse +from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse +from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse +from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams +from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse +from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse +from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse +from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse +from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput +from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py b/src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py new file mode 100644 index 00000000..92828873 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ...._models import BaseModel + +__all__ = ["APIAgentAPIKeyInfo"] + + +class APIAgentAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + secret_key: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py new file mode 100644 index 00000000..764a7cd0 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["APIKeyCreateParams"] + + +class APIKeyCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py new file mode 100644 index 00000000..cba52af4 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyCreateResponse"] + + +class APIKeyCreateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py new file mode 100644 index 00000000..b0909300 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_params.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_params.py new file mode 100644 index 00000000..11da9398 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py new file mode 100644 index 00000000..6bba5e35 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .api_meta import APIMeta +from .api_links import APILinks +from ...._models import BaseModel +from .api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py new file mode 100644 index 00000000..a778c2c3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyRegenerateResponse"] + + +class APIKeyRegenerateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py new file mode 100644 index 00000000..c426beeb --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["APIKeyUpdateParams"] + + +class APIKeyUpdateParams(TypedDict, total=False): + path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] + + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py b/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py new file mode 100644 index 00000000..a2974a36 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyUpdateResponse"] + + +class APIKeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py b/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py new file mode 100644 index 00000000..631ca75d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._compat import PYDANTIC_V2 +from ...._models import BaseModel + +__all__ = ["APILinkKnowledgeBaseOutput"] + + +class APILinkKnowledgeBaseOutput(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent + +if PYDANTIC_V2: + APILinkKnowledgeBaseOutput.model_rebuild() +else: + APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_links.py b/src/digitalocean_genai_sdk/types/genai/agents/api_links.py new file mode 100644 index 00000000..4205a6e2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_links.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["APILinks", "Pages"] + + +class Pages(BaseModel): + first: Optional[str] = None + + last: Optional[str] = None + + next: Optional[str] = None + + previous: Optional[str] = None + + +class APILinks(BaseModel): + pages: Optional[Pages] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_meta.py b/src/digitalocean_genai_sdk/types/genai/agents/api_meta.py new file mode 100644 index 00000000..513897f6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_meta.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["APIMeta"] + + +class APIMeta(BaseModel): + page: Optional[int] = None + + pages: Optional[int] = None + + total: Optional[int] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py new file mode 100644 index 00000000..9eec78be --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["ChildAgentAddParams"] + + +class ChildAgentAddParams(TypedDict, total=False): + path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] + + body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + + if_case: str + + body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] + """A unique identifier for the parent agent.""" + + route_name: str diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py new file mode 100644 index 00000000..38c04500 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["ChildAgentAddResponse"] + + +class ChildAgentAddResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None + """A unique identifier for the parent agent.""" diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py new file mode 100644 index 00000000..88f0d765 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["ChildAgentDeleteResponse"] + + +class ChildAgentDeleteResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py new file mode 100644 index 00000000..96e84622 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["ChildAgentUpdateParams"] + + +class ChildAgentUpdateParams(TypedDict, total=False): + path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] + + body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + + if_case: str + + body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] + """A unique identifier for the parent agent.""" + + route_name: str + + uuid: str diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py new file mode 100644 index 00000000..94eca533 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["ChildAgentUpdateResponse"] + + +class ChildAgentUpdateResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None + """A unique identifier for the parent agent.""" + + rollback: Optional[bool] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py new file mode 100644 index 00000000..1768b9ab --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._compat import PYDANTIC_V2 +from ...._models import BaseModel + +__all__ = ["ChildAgentViewResponse"] + + +class ChildAgentViewResponse(BaseModel): + children: Optional[List["APIAgent"]] = None + + +from ..api_agent import APIAgent + +if PYDANTIC_V2: + ChildAgentViewResponse.model_rebuild() +else: + ChildAgentViewResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py b/src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py new file mode 100644 index 00000000..6ca55898 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["FunctionCreateParams"] + + +class FunctionCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + description: str + + faas_name: str + + faas_namespace: str + + function_name: str + + input_schema: object + + output_schema: object diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py new file mode 100644 index 00000000..fb58caeb --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._compat import PYDANTIC_V2 +from ...._models import BaseModel + +__all__ = ["FunctionCreateResponse"] + + +class FunctionCreateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent + +if PYDANTIC_V2: + FunctionCreateResponse.model_rebuild() +else: + FunctionCreateResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py new file mode 100644 index 00000000..be5e1fda --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._compat import PYDANTIC_V2 +from ...._models import BaseModel + +__all__ = ["FunctionDeleteResponse"] + + +class FunctionDeleteResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent + +if PYDANTIC_V2: + FunctionDeleteResponse.model_rebuild() +else: + FunctionDeleteResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py b/src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py new file mode 100644 index 00000000..54ca15aa --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["FunctionUpdateParams"] + + +class FunctionUpdateParams(TypedDict, total=False): + path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] + + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + description: str + + faas_name: str + + faas_namespace: str + + function_name: str + + body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] + + input_schema: object + + output_schema: object diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py new file mode 100644 index 00000000..b9970518 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._compat import PYDANTIC_V2 +from ...._models import BaseModel + +__all__ = ["FunctionUpdateResponse"] + + +class FunctionUpdateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent + +if PYDANTIC_V2: + FunctionUpdateResponse.model_rebuild() +else: + FunctionUpdateResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py b/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py new file mode 100644 index 00000000..e6d0c7ad --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._compat import PYDANTIC_V2 +from ...._models import BaseModel + +__all__ = ["KnowledgeBaseDetachResponse"] + + +class KnowledgeBaseDetachResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent + +if PYDANTIC_V2: + KnowledgeBaseDetachResponse.model_rebuild() +else: + KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_list_params.py b/src/digitalocean_genai_sdk/types/genai/agents/version_list_params.py new file mode 100644 index 00000000..a71fd022 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/version_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["VersionListParams"] + + +class VersionListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py b/src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py new file mode 100644 index 00000000..bbe749b1 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py @@ -0,0 +1,118 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from pydantic import Field as FieldInfo + +from .api_meta import APIMeta +from .api_links import APILinks +from ...._models import BaseModel +from ..api_retrieval_method import APIRetrievalMethod + +__all__ = [ + "VersionListResponse", + "AgentVersion", + "AgentVersionAttachedChildAgent", + "AgentVersionAttachedFunction", + "AgentVersionAttachedGuardrail", + "AgentVersionAttachedKnowledgebase", +] + + +class AgentVersionAttachedChildAgent(BaseModel): + agent_name: Optional[str] = None + + child_agent_uuid: Optional[str] = None + + if_case: Optional[str] = None + + is_deleted: Optional[bool] = None + + route_name: Optional[str] = None + + +class AgentVersionAttachedFunction(BaseModel): + description: Optional[str] = None + + faas_name: Optional[str] = None + + faas_namespace: Optional[str] = None + + is_deleted: Optional[bool] = None + + name: Optional[str] = None + + +class AgentVersionAttachedGuardrail(BaseModel): + is_deleted: Optional[bool] = None + + name: Optional[str] = None + + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class AgentVersionAttachedKnowledgebase(BaseModel): + is_deleted: Optional[bool] = None + + name: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentVersion(BaseModel): + id: Optional[str] = None + + agent_uuid: Optional[str] = None + + attached_child_agents: Optional[List[AgentVersionAttachedChildAgent]] = None + + attached_functions: Optional[List[AgentVersionAttachedFunction]] = None + + attached_guardrails: Optional[List[AgentVersionAttachedGuardrail]] = None + + attached_knowledgebases: Optional[List[AgentVersionAttachedKnowledgebase]] = None + + can_rollback: Optional[bool] = None + + created_at: Optional[datetime] = None + + created_by_email: Optional[str] = None + + currently_applied: Optional[bool] = None + + description: Optional[str] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + max_tokens: Optional[int] = None + + api_model_name: Optional[str] = FieldInfo(alias="model_name", default=None) + + name: Optional[str] = None + + provide_citations: Optional[bool] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + top_p: Optional[float] = None + + trigger_action: Optional[str] = None + + version_hash: Optional[str] = None + + +class VersionListResponse(BaseModel): + agent_versions: Optional[List[AgentVersion]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py b/src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py new file mode 100644 index 00000000..610d8670 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["VersionUpdateParams"] + + +class VersionUpdateParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + + version_hash: str diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py b/src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py new file mode 100644 index 00000000..4cfa4045 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["VersionUpdateResponse", "AuditHeader"] + + +class AuditHeader(BaseModel): + actor_id: Optional[str] = None + + actor_ip: Optional[str] = None + + actor_uuid: Optional[str] = None + + context_urn: Optional[str] = None + + origin_application: Optional[str] = None + + user_id: Optional[str] = None + + user_uuid: Optional[str] = None + + +class VersionUpdateResponse(BaseModel): + audit_header: Optional[AuditHeader] = None + """An alternative way to provide auth information. for internal use only.""" + + version_hash: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/api_agent.py b/src/digitalocean_genai_sdk/types/genai/api_agent.py new file mode 100644 index 00000000..8a235001 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_agent.py @@ -0,0 +1,286 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._compat import PYDANTIC_V2 +from ..._models import BaseModel +from .api_model import APIModel +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_deployment_visibility import APIDeploymentVisibility +from .agents.api_agent_api_key_info import APIAgentAPIKeyInfo +from .providers.openai.api_openai_api_key_info import APIOpenAIAPIKeyInfo +from .providers.anthropic.api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = [ + "APIAgent", + "APIKey", + "Chatbot", + "ChatbotIdentifier", + "Deployment", + "Function", + "Guardrail", + "Template", + "TemplateGuardrail", +] + + +class APIKey(BaseModel): + api_key: Optional[str] = None + + +class Chatbot(BaseModel): + button_background_color: Optional[str] = None + + logo: Optional[str] = None + + name: Optional[str] = None + + primary_color: Optional[str] = None + + secondary_color: Optional[str] = None + + starting_message: Optional[str] = None + + +class ChatbotIdentifier(BaseModel): + agent_chatbot_identifier: Optional[str] = None + + +class Deployment(BaseModel): + created_at: Optional[datetime] = None + + name: Optional[str] = None + + status: Optional[ + Literal[ + "STATUS_UNKNOWN", + "STATUS_WAITING_FOR_DEPLOYMENT", + "STATUS_DEPLOYING", + "STATUS_RUNNING", + "STATUS_FAILED", + "STATUS_WAITING_FOR_UNDEPLOYMENT", + "STATUS_UNDEPLOYING", + "STATUS_UNDEPLOYMENT_FAILED", + "STATUS_DELETED", + ] + ] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + visibility: Optional[APIDeploymentVisibility] = None + + +class Function(BaseModel): + api_key: Optional[str] = None + + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + description: Optional[str] = None + + faas_name: Optional[str] = None + + faas_namespace: Optional[str] = None + + input_schema: Optional[object] = None + + name: Optional[str] = None + + output_schema: Optional[object] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Guardrail(BaseModel): + agent_uuid: Optional[str] = None + + created_at: Optional[datetime] = None + + default_response: Optional[str] = None + + description: Optional[str] = None + + guardrail_uuid: Optional[str] = None + + is_attached: Optional[bool] = None + + is_default: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + priority: Optional[int] = None + + type: Optional[ + Literal[ + "GUARDRAIL_TYPE_UNKNOWN", + "GUARDRAIL_TYPE_JAILBREAK", + "GUARDRAIL_TYPE_SENSITIVE_DATA", + "GUARDRAIL_TYPE_CONTENT_MODERATION", + ] + ] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class TemplateGuardrail(BaseModel): + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class Template(BaseModel): + created_at: Optional[datetime] = None + + description: Optional[str] = None + + guardrails: Optional[List[TemplateGuardrail]] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + long_description: Optional[str] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + short_description: Optional[str] = None + + summary: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class APIAgent(BaseModel): + anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None + + api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + + api_keys: Optional[List[APIKey]] = None + + chatbot: Optional[Chatbot] = None + + chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None + + child_agents: Optional[List["APIAgent"]] = None + + created_at: Optional[datetime] = None + + deployment: Optional[Deployment] = None + + description: Optional[str] = None + + functions: Optional[List[Function]] = None + + guardrails: Optional[List[Guardrail]] = None + + if_case: Optional[str] = None + + instruction: Optional[str] = None + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None + + parent_agents: Optional[List["APIAgent"]] = None + + project_id: Optional[str] = None + + provide_citations: Optional[bool] = None + + region: Optional[str] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + route_created_at: Optional[datetime] = None + + route_created_by: Optional[str] = None + + route_name: Optional[str] = None + + route_uuid: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template: Optional[Template] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None + + +if PYDANTIC_V2: + APIAgent.model_rebuild() + APIKey.model_rebuild() + Chatbot.model_rebuild() + ChatbotIdentifier.model_rebuild() + Deployment.model_rebuild() + Function.model_rebuild() + Guardrail.model_rebuild() + Template.model_rebuild() + TemplateGuardrail.model_rebuild() +else: + APIAgent.update_forward_refs() # type: ignore + APIKey.update_forward_refs() # type: ignore + Chatbot.update_forward_refs() # type: ignore + ChatbotIdentifier.update_forward_refs() # type: ignore + Deployment.update_forward_refs() # type: ignore + Function.update_forward_refs() # type: ignore + Guardrail.update_forward_refs() # type: ignore + Template.update_forward_refs() # type: ignore + TemplateGuardrail.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/api_agreement.py b/src/digitalocean_genai_sdk/types/genai/api_agreement.py new file mode 100644 index 00000000..512ec34a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_agreement.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APIAgreement"] + + +class APIAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/api_deployment_visibility.py b/src/digitalocean_genai_sdk/types/genai/api_deployment_visibility.py new file mode 100644 index 00000000..a63e3acd --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_deployment_visibility.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["APIDeploymentVisibility"] + +APIDeploymentVisibility: TypeAlias = Literal[ + "VISIBILITY_UNKNOWN", "VISIBILITY_DISABLED", "VISIBILITY_PLAYGROUND", "VISIBILITY_PUBLIC", "VISIBILITY_PRIVATE" +] diff --git a/src/digitalocean_genai_sdk/types/genai/api_indexing_job.py b/src/digitalocean_genai_sdk/types/genai/api_indexing_job.py new file mode 100644 index 00000000..2809141c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_indexing_job.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["APIIndexingJob"] + + +class APIIndexingJob(BaseModel): + completed_datasources: Optional[int] = None + + created_at: Optional[datetime] = None + + data_source_uuids: Optional[List[str]] = None + + finished_at: Optional[datetime] = None + + knowledge_base_uuid: Optional[str] = None + + phase: Optional[ + Literal[ + "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", + "BATCH_JOB_PHASE_CANCELLED", + ] + ] = None + + started_at: Optional[datetime] = None + + tokens: Optional[int] = None + + total_datasources: Optional[int] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py b/src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py new file mode 100644 index 00000000..1ea8c901 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["APIKnowledgeBase"] + + +class APIKnowledgeBase(BaseModel): + added_to_agent_at: Optional[datetime] = None + + created_at: Optional[datetime] = None + + database_id: Optional[str] = None + + embedding_model_uuid: Optional[str] = None + + is_public: Optional[bool] = None + + last_indexing_job: Optional[APIIndexingJob] = None + + name: Optional[str] = None + + project_id: Optional[str] = None + + region: Optional[str] = None + + tags: Optional[List[str]] = None + + updated_at: Optional[datetime] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/api_model.py b/src/digitalocean_genai_sdk/types/genai/api_model.py new file mode 100644 index 00000000..38efa6dc --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_model.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion + +__all__ = ["APIModel"] + + +class APIModel(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None diff --git a/src/digitalocean_genai_sdk/types/genai/api_model_version.py b/src/digitalocean_genai_sdk/types/genai/api_model_version.py new file mode 100644 index 00000000..c96a4faf --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_model_version.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APIModelVersion"] + + +class APIModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None diff --git a/src/digitalocean_genai_sdk/types/genai/api_retrieval_method.py b/src/digitalocean_genai_sdk/types/genai/api_retrieval_method.py new file mode 100644 index 00000000..9d92838e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/api_retrieval_method.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["APIRetrievalMethod"] + +APIRetrievalMethod: TypeAlias = Literal[ + "RETRIEVAL_METHOD_UNKNOWN", + "RETRIEVAL_METHOD_REWRITE", + "RETRIEVAL_METHOD_STEP_BACK", + "RETRIEVAL_METHOD_SUB_QUERIES", + "RETRIEVAL_METHOD_NONE", +] diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/__init__.py b/src/digitalocean_genai_sdk/types/genai/auth/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/fine_tuning/__init__.py rename to src/digitalocean_genai_sdk/types/genai/auth/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/auth/agents/__init__.py b/src/digitalocean_genai_sdk/types/genai/auth/agents/__init__.py new file mode 100644 index 00000000..9fae55b6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/auth/agents/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .token_create_params import TokenCreateParams as TokenCreateParams +from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py b/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py new file mode 100644 index 00000000..aa95c9c2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ....._utils import PropertyInfo + +__all__ = ["TokenCreateParams"] + + +class TokenCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py b/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py new file mode 100644 index 00000000..018d4755 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel + +__all__ = ["TokenCreateResponse"] + + +class TokenCreateResponse(BaseModel): + access_token: Optional[str] = None + + refresh_token: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_create_params.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_create_params.py new file mode 100644 index 00000000..04838472 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_create_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["IndexingJobCreateParams"] + + +class IndexingJobCreateParams(TypedDict, total=False): + data_source_uuids: List[str] + + knowledge_base_uuid: str diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py new file mode 100644 index 00000000..835ec60d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobCreateResponse"] + + +class IndexingJobCreateResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_list_params.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_list_params.py new file mode 100644 index 00000000..90206aba --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["IndexingJobListParams"] + + +class IndexingJobListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py new file mode 100644 index 00000000..b6d62f88 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobListResponse"] + + +class IndexingJobListResponse(BaseModel): + jobs: Optional[List[APIIndexingJob]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py new file mode 100644 index 00000000..a9d0c2c0 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] + + +class IndexedDataSource(BaseModel): + completed_at: Optional[datetime] = None + + data_source_uuid: Optional[str] = None + + error_details: Optional[str] = None + + error_msg: Optional[str] = None + + failed_item_count: Optional[str] = None + + indexed_file_count: Optional[str] = None + + indexed_item_count: Optional[str] = None + + removed_item_count: Optional[str] = None + + skipped_item_count: Optional[str] = None + + started_at: Optional[datetime] = None + + status: Optional[ + Literal[ + "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", + "DATA_SOURCE_STATUS_FAILED", + ] + ] = None + + total_bytes: Optional[str] = None + + total_bytes_indexed: Optional[str] = None + + total_file_count: Optional[str] = None + + +class IndexingJobRetrieveDataSourcesResponse(BaseModel): + indexed_data_sources: Optional[List[IndexedDataSource]] = None diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py new file mode 100644 index 00000000..6034bdf1 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobRetrieveResponse"] + + +class IndexingJobRetrieveResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py new file mode 100644 index 00000000..9359a42a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["IndexingJobUpdateCancelParams"] + + +class IndexingJobUpdateCancelParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """A unique identifier for an indexing job.""" diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py b/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py new file mode 100644 index 00000000..ae4b394f --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobUpdateCancelResponse"] + + +class IndexingJobUpdateCancelResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_params.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_params.py new file mode 100644 index 00000000..3a58166b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_params.py @@ -0,0 +1,64 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import TypedDict + +from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam +from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["KnowledgeBaseCreateParams", "Datasource"] + + +class KnowledgeBaseCreateParams(TypedDict, total=False): + database_id: str + """ + Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + """ + + datasources: Iterable[Datasource] + """The data sources to use for this knowledge base. + + See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + """ + + embedding_model_uuid: str + """ + Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + """ + + name: str + """Name of the knowledge base.""" + + project_id: str + """Identifier of the DigitalOcean project this knowledge base will belong to.""" + + region: str + """The datacenter region to deploy the knowledge base in.""" + + tags: List[str] + """Tags to organize your knowledge base.""" + + vpc_uuid: str + + +class Datasource(TypedDict, total=False): + bucket_name: str + + bucket_region: str + + file_upload_data_source: APIFileUploadDataSourceParam + """File to upload as data source for knowledge base.""" + + item_path: str + + spaces_data_source: APISpacesDataSourceParam + + web_crawler_data_source: APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py new file mode 100644 index 00000000..ec69972a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseCreateResponse"] + + +class KnowledgeBaseCreateResponse(BaseModel): + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py new file mode 100644 index 00000000..486423d3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["KnowledgeBaseDeleteResponse"] + + +class KnowledgeBaseDeleteResponse(BaseModel): + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_params.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_params.py new file mode 100644 index 00000000..dcf9a0ec --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KnowledgeBaseListParams"] + + +class KnowledgeBaseListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py new file mode 100644 index 00000000..0db1bc1c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseListResponse"] + + +class KnowledgeBaseListResponse(BaseModel): + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py new file mode 100644 index 00000000..e139049d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseRetrieveResponse"] + + +class KnowledgeBaseRetrieveResponse(BaseModel): + database_status: Optional[ + Literal[ + "CREATING", + "ONLINE", + "POWEROFF", + "REBUILDING", + "REBALANCING", + "DECOMMISSIONED", + "FORKING", + "MIGRATING", + "RESIZING", + "RESTORING", + "POWERING_ON", + "UNHEALTHY", + ] + ] = None + + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py new file mode 100644 index 00000000..18bf442d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["KnowledgeBaseUpdateParams"] + + +class KnowledgeBaseUpdateParams(TypedDict, total=False): + database_id: str + """the id of the DigitalOcean database this knowledge base will use, optiona.""" + + embedding_model_uuid: str + """Identifier for the foundation model.""" + + name: str + + project_id: str + + tags: List[str] + """Tags to organize your knowledge base.""" + + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py new file mode 100644 index 00000000..2c8bfcce --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseUpdateResponse"] + + +class KnowledgeBaseUpdateResponse(BaseModel): + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/__init__.py new file mode 100644 index 00000000..f5f31034 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/__init__.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource +from .data_source_list_params import DataSourceListParams as DataSourceListParams +from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams +from .data_source_list_response import DataSourceListResponse as DataSourceListResponse +from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource +from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource +from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse +from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse +from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource +from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam +from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py new file mode 100644 index 00000000..f3cf81ec --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["APIFileUploadDataSource"] + + +class APIFileUploadDataSource(BaseModel): + original_file_name: Optional[str] = None + + size_in_bytes: Optional[str] = None + + stored_object_key: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source_param.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source_param.py new file mode 100644 index 00000000..37221059 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIFileUploadDataSourceParam"] + + +class APIFileUploadDataSourceParam(TypedDict, total=False): + original_file_name: str + + size_in_bytes: str + + stored_object_key: str diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py new file mode 100644 index 00000000..b281888d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ...._models import BaseModel +from ..api_indexing_job import APIIndexingJob +from .api_spaces_data_source import APISpacesDataSource +from .api_file_upload_data_source import APIFileUploadDataSource +from .api_web_crawler_data_source import APIWebCrawlerDataSource + +__all__ = ["APIKnowledgeBaseDataSource"] + + +class APIKnowledgeBaseDataSource(BaseModel): + bucket_name: Optional[str] = None + + created_at: Optional[datetime] = None + + file_upload_data_source: Optional[APIFileUploadDataSource] = None + """File to upload as data source for knowledge base.""" + + item_path: Optional[str] = None + + last_indexing_job: Optional[APIIndexingJob] = None + + region: Optional[str] = None + + spaces_data_source: Optional[APISpacesDataSource] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py new file mode 100644 index 00000000..e35b5cbe --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["APISpacesDataSource"] + + +class APISpacesDataSource(BaseModel): + bucket_name: Optional[str] = None + + item_path: Optional[str] = None + + region: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source_param.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source_param.py new file mode 100644 index 00000000..b7f2f657 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APISpacesDataSourceParam"] + + +class APISpacesDataSourceParam(TypedDict, total=False): + bucket_name: str + + item_path: str + + region: str diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py new file mode 100644 index 00000000..d2db09ff --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["APIWebCrawlerDataSource"] + + +class APIWebCrawlerDataSource(BaseModel): + base_url: Optional[str] = None + """The base url to crawl.""" + + crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]] = None + """Options for specifying how URLs found on pages should be handled. + + - UNKNOWN: Default unknown value + - SCOPED: Only include the base URL. + - PATH: Crawl the base URL and linked pages within the URL path. + - DOMAIN: Crawl the base URL and linked pages within the same domain. + - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. + """ + + embed_media: Optional[bool] = None + """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source_param.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source_param.py new file mode 100644 index 00000000..2345ed3a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["APIWebCrawlerDataSourceParam"] + + +class APIWebCrawlerDataSourceParam(TypedDict, total=False): + base_url: str + """The base url to crawl.""" + + crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"] + """Options for specifying how URLs found on pages should be handled. + + - UNKNOWN: Default unknown value + - SCOPED: Only include the base URL. + - PATH: Crawl the base URL and linked pages within the URL path. + - DOMAIN: Crawl the base URL and linked pages within the same domain. + - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. + """ + + embed_media: bool + """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py new file mode 100644 index 00000000..5363eb15 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo +from .api_spaces_data_source_param import APISpacesDataSourceParam +from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["DataSourceCreateParams", "AwsDataSource"] + + +class DataSourceCreateParams(TypedDict, total=False): + aws_data_source: AwsDataSource + + body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] + + spaces_data_source: APISpacesDataSourceParam + + web_crawler_data_source: APIWebCrawlerDataSourceParam + + +class AwsDataSource(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py new file mode 100644 index 00000000..694c7a29 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource + +__all__ = ["DataSourceCreateResponse"] + + +class DataSourceCreateResponse(BaseModel): + knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py new file mode 100644 index 00000000..e04383bc --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["DataSourceDeleteResponse"] + + +class DataSourceDeleteResponse(BaseModel): + data_source_uuid: Optional[str] = None + + knowledge_base_uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_params.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_params.py new file mode 100644 index 00000000..e3ed5e3c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["DataSourceListParams"] + + +class DataSourceListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py new file mode 100644 index 00000000..872a7923 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource + +__all__ = ["DataSourceListResponse"] + + +class DataSourceListResponse(BaseModel): + knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/model_list_params.py b/src/digitalocean_genai_sdk/types/genai/model_list_params.py new file mode 100644 index 00000000..4abc1dc1 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/model_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["ModelListParams"] + + +class ModelListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" + + public_only: bool + """only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/digitalocean_genai_sdk/types/genai/model_list_response.py b/src/digitalocean_genai_sdk/types/genai/model_list_response.py new file mode 100644 index 00000000..ef9ea0f3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/model_list_response.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel +from .api_agreement import APIAgreement +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_model_version import APIModelVersion + +__all__ = ["ModelListResponse", "Model"] + + +class Model(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None + + +class ModelListResponse(BaseModel): + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + models: Optional[List[Model]] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/__init__.py b/src/digitalocean_genai_sdk/types/genai/models/__init__.py new file mode 100644 index 00000000..c3cbcd6d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/__init__.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_create_params.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_create_params.py new file mode 100644 index 00000000..16cc23c9 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_create_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyCreateParams"] + + +class APIKeyCreateParams(TypedDict, total=False): + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py new file mode 100644 index 00000000..3840dd85 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyCreateResponse"] + + +class APIKeyCreateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py new file mode 100644 index 00000000..5788a51e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_list_params.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_list_params.py new file mode 100644 index 00000000..11da9398 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py new file mode 100644 index 00000000..966d7e49 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py new file mode 100644 index 00000000..3712576c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["APIKeyUpdateParams"] + + +class APIKeyUpdateParams(TypedDict, total=False): + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py new file mode 100644 index 00000000..dd6e050b --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyUpdateRegenerateResponse"] + + +class APIKeyUpdateRegenerateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py b/src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py new file mode 100644 index 00000000..62ca0dfe --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyUpdateResponse"] + + +class APIKeyUpdateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py b/src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py new file mode 100644 index 00000000..fd7545b3 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ...._models import BaseModel + +__all__ = ["APIModelAPIKeyInfo"] + + +class APIModelAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + secret_key: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py b/src/digitalocean_genai_sdk/types/genai/providers/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/fine_tuning/checkpoints/__init__.py rename to src/digitalocean_genai_sdk/types/genai/providers/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/__init__.py new file mode 100644 index 00000000..019eab26 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams +from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py new file mode 100644 index 00000000..e18489b2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ....._models import BaseModel + +__all__ = ["APIAnthropicAPIKeyInfo"] + + +class APIAnthropicAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_params.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py new file mode 100644 index 00000000..612baa41 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py new file mode 100644 index 00000000..9ad9f478 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_params.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_params.py new file mode 100644 index 00000000..ebbc3b7e --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListAgentsParams"] + + +class KeyListAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py new file mode 100644 index 00000000..4c2453f6 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ....._compat import PYDANTIC_V2 +from ....._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyListAgentsResponse"] + + +class KeyListAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent + +if PYDANTIC_V2: + KeyListAgentsResponse.model_rebuild() +else: + KeyListAgentsResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_params.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py new file mode 100644 index 00000000..47f9cc3a --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ....._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py new file mode 100644 index 00000000..39141a92 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py new file mode 100644 index 00000000..a9011e00 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ....._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py new file mode 100644 index 00000000..e8a5d19d --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/__init__.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/__init__.py new file mode 100644 index 00000000..46746a06 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams +from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py new file mode 100644 index 00000000..7a4267a0 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ....._models import BaseModel +from ...api_model import APIModel + +__all__ = ["APIOpenAIAPIKeyInfo"] + + +class APIOpenAIAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + models: Optional[List[APIModel]] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_params.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py new file mode 100644 index 00000000..539cebf2 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py new file mode 100644 index 00000000..fcf6bf04 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_params.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py new file mode 100644 index 00000000..2ce69038 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ....._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_params.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_params.py new file mode 100644 index 00000000..ec745d14 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyRetrieveAgentsParams"] + + +class KeyRetrieveAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py new file mode 100644 index 00000000..af1fdf37 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ....._compat import PYDANTIC_V2 +from ....._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyRetrieveAgentsResponse"] + + +class KeyRetrieveAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent + +if PYDANTIC_V2: + KeyRetrieveAgentsResponse.model_rebuild() +else: + KeyRetrieveAgentsResponse.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py new file mode 100644 index 00000000..156edaeb --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py new file mode 100644 index 00000000..a9011e00 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ....._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py new file mode 100644 index 00000000..bfb5faae --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py b/src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py new file mode 100644 index 00000000..678c4ead --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GenaiRetrieveRegionsParams"] + + +class GenaiRetrieveRegionsParams(TypedDict, total=False): + serves_batch: bool + """include datacenters that are capable of running batch jobs.""" + + serves_inference: bool + """include datacenters that serve inference.""" diff --git a/src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py b/src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py new file mode 100644 index 00000000..e4b21d51 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel + +__all__ = ["GenaiRetrieveRegionsResponse", "Region"] + + +class Region(BaseModel): + inference_url: Optional[str] = None + + region: Optional[str] = None + + serves_batch: Optional[bool] = None + + serves_inference: Optional[bool] = None + + stream_inference_url: Optional[str] = None + + +class GenaiRetrieveRegionsResponse(BaseModel): + regions: Optional[List[Region]] = None diff --git a/src/digitalocean_genai_sdk/types/stop_configuration_param.py b/src/digitalocean_genai_sdk/types/stop_configuration_param.py deleted file mode 100644 index d3093c7c..00000000 --- a/src/digitalocean_genai_sdk/types/stop_configuration_param.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import TypeAlias - -__all__ = ["StopConfigurationParam"] - -StopConfigurationParam: TypeAlias = Union[Optional[str], List[str]] diff --git a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py b/src/digitalocean_genai_sdk/types/threads/runs/__init__.py deleted file mode 100644 index f8ee8b14..00000000 --- a/src/digitalocean_genai_sdk/types/threads/runs/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py b/src/digitalocean_genai_sdk/types/vector_stores/__init__.py deleted file mode 100644 index f8ee8b14..00000000 --- a/src/digitalocean_genai_sdk/types/vector_stores/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/tests/api_resources/chat/__init__.py b/tests/api_resources/genai/__init__.py similarity index 100% rename from tests/api_resources/chat/__init__.py rename to tests/api_resources/genai/__init__.py diff --git a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py b/tests/api_resources/genai/agents/__init__.py similarity index 70% rename from src/digitalocean_genai_sdk/types/organization/projects/__init__.py rename to tests/api_resources/genai/agents/__init__.py index f8ee8b14..fd8019a9 100644 --- a/src/digitalocean_genai_sdk/types/organization/projects/__init__.py +++ b/tests/api_resources/genai/agents/__init__.py @@ -1,3 +1 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/tests/api_resources/genai/agents/test_api_keys.py b/tests/api_resources/genai/agents/test_api_keys.py new file mode 100644 index 00000000..2b55534a --- /dev/null +++ b/tests/api_resources/genai/agents/test_api_keys.py @@ -0,0 +1,572 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.agents import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, + APIKeyRegenerateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.api_keys.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.api_keys.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.update( + path_api_key_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.list( + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.list( + agent_uuid="agent_uuid", + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.api_keys.with_raw_response.list( + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.api_keys.with_streaming_response.list( + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.list( + agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.api_keys.with_streaming_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.delete( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.agents.api_keys.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.api_keys.with_streaming_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.api_keys.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.api_keys.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.update( + path_api_key_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.list( + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.list( + agent_uuid="agent_uuid", + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.api_keys.with_raw_response.list( + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.api_keys.with_streaming_response.list( + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.list( + agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.api_keys.with_streaming_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.delete( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.agents.api_keys.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.api_keys.with_streaming_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/genai/agents/test_child_agents.py b/tests/api_resources/genai/agents/test_child_agents.py new file mode 100644 index 00000000..ee94b3fb --- /dev/null +++ b/tests/api_resources/genai/agents/test_child_agents.py @@ -0,0 +1,485 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.agents import ( + ChildAgentAddResponse, + ChildAgentViewResponse, + ChildAgentDeleteResponse, + ChildAgentUpdateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestChildAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + child_agent = client.genai.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + child_agent = client.genai.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.child_agents.with_streaming_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + client.genai.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + client.genai.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + child_agent = client.genai.agents.child_agents.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.child_agents.with_streaming_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): + client.genai.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): + client.genai.agents.child_agents.with_raw_response.delete( + child_agent_uuid="", + parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: + child_agent = client.genai.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_add_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + child_agent = client.genai.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.child_agents.with_streaming_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + client.genai.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + client.genai.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_view(self, client: DigitaloceanGenaiSDK) -> None: + child_agent = client.genai.agents.child_agents.view( + "uuid", + ) + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_view(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.child_agents.with_raw_response.view( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_view(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.child_agents.with_streaming_response.view( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_view(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.agents.child_agents.with_raw_response.view( + "", + ) + + +class TestAsyncChildAgents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + child_agent = await async_client.genai.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + child_agent = await async_client.genai.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.child_agents.with_streaming_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + await async_client.genai.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + await async_client.genai.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + child_agent = await async_client.genai.agents.child_agents.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.child_agents.with_streaming_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): + await async_client.genai.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): + await async_client.genai.agents.child_agents.with_raw_response.delete( + child_agent_uuid="", + parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + child_agent = await async_client.genai.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_add_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + child_agent = await async_client.genai.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.child_agents.with_streaming_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + await async_client.genai.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + await async_client.genai.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + child_agent = await async_client.genai.agents.child_agents.view( + "uuid", + ) + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.child_agents.with_raw_response.view( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.child_agents.with_streaming_response.view( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.agents.child_agents.with_raw_response.view( + "", + ) diff --git a/tests/api_resources/genai/agents/test_functions.py b/tests/api_resources/genai/agents/test_functions.py new file mode 100644 index 00000000..c2e470df --- /dev/null +++ b/tests/api_resources/genai/agents/test_functions.py @@ -0,0 +1,382 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.agents import ( + FunctionCreateResponse, + FunctionDeleteResponse, + FunctionUpdateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFunctions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + function = client.genai.agents.functions.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + function = client.genai.agents.functions.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.functions.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.functions.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.genai.agents.functions.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + function = client.genai.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + function = client.genai.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.functions.with_streaming_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.genai.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): + client.genai.agents.functions.with_raw_response.update( + path_function_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + function = client.genai.agents.functions.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.functions.with_streaming_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): + client.genai.agents.functions.with_raw_response.delete( + function_uuid="", + agent_uuid="agent_uuid", + ) + + +class TestAsyncFunctions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + function = await async_client.genai.agents.functions.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + function = await async_client.genai.agents.functions.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.functions.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.functions.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.genai.agents.functions.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + function = await async_client.genai.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + function = await async_client.genai.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.functions.with_streaming_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.genai.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): + await async_client.genai.agents.functions.with_raw_response.update( + path_function_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + function = await async_client.genai.agents.functions.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.functions.with_streaming_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): + await async_client.genai.agents.functions.with_raw_response.delete( + function_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/genai/agents/test_knowledge_bases.py b/tests/api_resources/genai/agents/test_knowledge_bases.py new file mode 100644 index 00000000..ac7d97d1 --- /dev/null +++ b/tests/api_resources/genai/agents/test_knowledge_bases.py @@ -0,0 +1,314 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKnowledgeBases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_attach(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.agents.knowledge_bases.attach( + "agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_attach(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.knowledge_bases.with_raw_response.attach( + "agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_attach(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.knowledge_bases.with_streaming_response.attach( + "agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_attach(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.knowledge_bases.with_raw_response.attach( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_attach_single(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.agents.knowledge_bases.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.knowledge_bases.with_streaming_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_attach_single(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.genai.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_detach(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.agents.knowledge_bases.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_detach(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_detach(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.knowledge_bases.with_streaming_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_detach(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.genai.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.genai.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + +class TestAsyncKnowledgeBases: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.agents.knowledge_bases.attach( + "agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.knowledge_bases.with_raw_response.attach( + "agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.knowledge_bases.with_streaming_response.attach( + "agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.knowledge_bases.with_raw_response.attach( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.agents.knowledge_bases.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.knowledge_bases.with_streaming_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.genai.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.agents.knowledge_bases.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.knowledge_bases.with_streaming_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.genai.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.genai.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/genai/agents/test_versions.py b/tests/api_resources/genai/agents/test_versions.py new file mode 100644 index 00000000..d954927d --- /dev/null +++ b/tests/api_resources/genai/agents/test_versions.py @@ -0,0 +1,233 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.agents import ( + VersionListResponse, + VersionUpdateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestVersions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + version = client.genai.agents.versions.update( + path_uuid="uuid", + ) + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + version = client.genai.agents.versions.update( + path_uuid="uuid", + body_uuid="uuid", + version_hash="version_hash", + ) + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.versions.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = response.parse() + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.versions.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = response.parse() + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.genai.agents.versions.with_raw_response.update( + path_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + version = client.genai.agents.versions.list( + uuid="uuid", + ) + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + version = client.genai.agents.versions.list( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.versions.with_raw_response.list( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.versions.with_streaming_response.list( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.agents.versions.with_raw_response.list( + uuid="", + ) + + +class TestAsyncVersions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + version = await async_client.genai.agents.versions.update( + path_uuid="uuid", + ) + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + version = await async_client.genai.agents.versions.update( + path_uuid="uuid", + body_uuid="uuid", + version_hash="version_hash", + ) + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.versions.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = await response.parse() + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.versions.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = await response.parse() + assert_matches_type(VersionUpdateResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.genai.agents.versions.with_raw_response.update( + path_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + version = await async_client.genai.agents.versions.list( + uuid="uuid", + ) + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + version = await async_client.genai.agents.versions.list( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.versions.with_raw_response.list( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + version = await response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.versions.with_streaming_response.list( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + version = await response.parse() + assert_matches_type(VersionListResponse, version, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.agents.versions.with_raw_response.list( + uuid="", + ) diff --git a/src/digitalocean_genai_sdk/types/threads/__init__.py b/tests/api_resources/genai/auth/__init__.py similarity index 70% rename from src/digitalocean_genai_sdk/types/threads/__init__.py rename to tests/api_resources/genai/auth/__init__.py index f8ee8b14..fd8019a9 100644 --- a/src/digitalocean_genai_sdk/types/threads/__init__.py +++ b/tests/api_resources/genai/auth/__init__.py @@ -1,3 +1 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py b/tests/api_resources/genai/auth/agents/__init__.py similarity index 70% rename from src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py rename to tests/api_resources/genai/auth/agents/__init__.py index f8ee8b14..fd8019a9 100644 --- a/src/digitalocean_genai_sdk/types/fine_tuning/jobs/__init__.py +++ b/tests/api_resources/genai/auth/agents/__init__.py @@ -1,3 +1 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/tests/api_resources/genai/auth/agents/test_token.py b/tests/api_resources/genai/auth/agents/test_token.py new file mode 100644 index 00000000..04cdd452 --- /dev/null +++ b/tests/api_resources/genai/auth/agents/test_token.py @@ -0,0 +1,124 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.auth.agents import TokenCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestToken: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + token = client.genai.auth.agents.token.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + token = client.genai.auth.agents.token.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.auth.agents.token.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + token = response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.auth.agents.token.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + token = response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.genai.auth.agents.token.with_raw_response.create( + path_agent_uuid="", + ) + + +class TestAsyncToken: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + token = await async_client.genai.auth.agents.token.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + token = await async_client.genai.auth.agents.token.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.auth.agents.token.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + token = await response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.auth.agents.token.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + token = await response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.genai.auth.agents.token.with_raw_response.create( + path_agent_uuid="", + ) diff --git a/src/digitalocean_genai_sdk/types/organization/__init__.py b/tests/api_resources/genai/knowledge_bases/__init__.py similarity index 70% rename from src/digitalocean_genai_sdk/types/organization/__init__.py rename to tests/api_resources/genai/knowledge_bases/__init__.py index f8ee8b14..fd8019a9 100644 --- a/src/digitalocean_genai_sdk/types/organization/__init__.py +++ b/tests/api_resources/genai/knowledge_bases/__init__.py @@ -1,3 +1 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/tests/api_resources/genai/knowledge_bases/test_data_sources.py b/tests/api_resources/genai/knowledge_bases/test_data_sources.py new file mode 100644 index 00000000..b51c7e0c --- /dev/null +++ b/tests/api_resources/genai/knowledge_bases/test_data_sources.py @@ -0,0 +1,374 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.knowledge_bases import ( + DataSourceListResponse, + DataSourceCreateResponse, + DataSourceDeleteResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDataSources: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + data_source = client.genai.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + data_source = client.genai.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, + body_knowledge_base_uuid="knowledge_base_uuid", + spaces_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + web_crawler_data_source={ + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.data_sources.with_streaming_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" + ): + client.genai.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + data_source = client.genai.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + data_source = client.genai.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + page=0, + per_page=0, + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.data_sources.with_streaming_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.genai.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + data_source = client.genai.knowledge_bases.data_sources.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.data_sources.with_streaming_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.genai.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): + client.genai.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="", + knowledge_base_uuid="knowledge_base_uuid", + ) + + +class TestAsyncDataSources: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + data_source = await async_client.genai.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + data_source = await async_client.genai.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, + body_knowledge_base_uuid="knowledge_base_uuid", + spaces_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + web_crawler_data_source={ + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.data_sources.with_streaming_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" + ): + await async_client.genai.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + data_source = await async_client.genai.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + data_source = await async_client.genai.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + page=0, + per_page=0, + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.data_sources.with_streaming_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.genai.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + data_source = await async_client.genai.knowledge_bases.data_sources.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.data_sources.with_streaming_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.genai.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): + await async_client.genai.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="", + knowledge_base_uuid="knowledge_base_uuid", + ) diff --git a/tests/api_resources/genai/models/__init__.py b/tests/api_resources/genai/models/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/genai/models/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/genai/models/test_api_keys.py b/tests/api_resources/genai/models/test_api_keys.py new file mode 100644 index 00000000..afc9caa3 --- /dev/null +++ b/tests/api_resources/genai/models/test_api_keys.py @@ -0,0 +1,446 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.models import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, + APIKeyUpdateRegenerateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.models.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.models.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.models.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.models.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.genai.models.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.list( + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.models.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.models.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.models.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.models.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.models.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + api_key = client.genai.models.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.models.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.models.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.models.api_keys.with_raw_response.update_regenerate( + "", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.models.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.models.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.models.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.models.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.genai.models.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.list( + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.models.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.models.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.models.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.models.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.models.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + api_key = await async_client.genai.models.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.models.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.models.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.models.api_keys.with_raw_response.update_regenerate( + "", + ) diff --git a/tests/api_resources/genai/providers/__init__.py b/tests/api_resources/genai/providers/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/genai/providers/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/genai/providers/anthropic/__init__.py b/tests/api_resources/genai/providers/anthropic/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/genai/providers/anthropic/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/genai/providers/anthropic/test_keys.py b/tests/api_resources/genai/providers/anthropic/test_keys.py new file mode 100644 index 00000000..f83ec64b --- /dev/null +++ b/tests/api_resources/genai/providers/anthropic/test_keys.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.providers.anthropic import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyListAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.genai.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_agents(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.genai.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) diff --git a/tests/api_resources/genai/providers/openai/__init__.py b/tests/api_resources/genai/providers/openai/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/genai/providers/openai/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/genai/providers/openai/test_keys.py b/tests/api_resources/genai/providers/openai/test_keys.py new file mode 100644 index 00000000..572292bc --- /dev/null +++ b/tests/api_resources/genai/providers/openai/test_keys.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai.providers.openai import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyRetrieveAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.genai.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.genai.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + key = client.genai.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.genai.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.genai.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + key = await async_client.genai.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) diff --git a/tests/api_resources/genai/test_agents.py b/tests/api_resources/genai/test_agents.py new file mode 100644 index 00000000..e7e57f99 --- /dev/null +++ b/tests/api_resources/genai/test_agents.py @@ -0,0 +1,597 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai import ( + AgentListResponse, + AgentCreateResponse, + AgentDeleteResponse, + AgentUpdateResponse, + AgentRetrieveResponse, + AgentUpdateStatusResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.create( + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], + ) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.retrieve( + "uuid", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.agents.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.update( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.update( + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + provide_citations=True, + retrieval_method="RETRIEVAL_METHOD_UNKNOWN", + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.genai.agents.with_raw_response.update( + path_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.list( + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.delete( + "uuid", + ) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.agents.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_status(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.update_status( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_status_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + agent = client.genai.agents.update_status( + path_uuid="uuid", + body_uuid="uuid", + visibility="VISIBILITY_UNKNOWN", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.agents.with_raw_response.update_status( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.agents.with_streaming_response.update_status( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_status(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.genai.agents.with_raw_response.update_status( + path_uuid="", + ) + + +class TestAsyncAgents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.create( + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], + ) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.retrieve( + "uuid", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.agents.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.update( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.update( + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + provide_citations=True, + retrieval_method="RETRIEVAL_METHOD_UNKNOWN", + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.genai.agents.with_raw_response.update( + path_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.list( + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.delete( + "uuid", + ) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.agents.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.update_status( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_status_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + agent = await async_client.genai.agents.update_status( + path_uuid="uuid", + body_uuid="uuid", + visibility="VISIBILITY_UNKNOWN", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.agents.with_raw_response.update_status( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.agents.with_streaming_response.update_status( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.genai.agents.with_raw_response.update_status( + path_uuid="", + ) diff --git a/tests/api_resources/genai/test_indexing_jobs.py b/tests/api_resources/genai/test_indexing_jobs.py new file mode 100644 index 00000000..89d9295b --- /dev/null +++ b/tests/api_resources/genai/test_indexing_jobs.py @@ -0,0 +1,446 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai import ( + IndexingJobListResponse, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobUpdateCancelResponse, + IndexingJobRetrieveDataSourcesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestIndexingJobs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.create() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.create( + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.indexing_jobs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.indexing_jobs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.retrieve( + "uuid", + ) + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.indexing_jobs.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.indexing_jobs.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.indexing_jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.list() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.list( + page=0, + per_page=0, + ) + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.indexing_jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.indexing_jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.retrieve_data_sources( + "indexing_job_uuid", + ) + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + "indexing_job_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.indexing_jobs.with_streaming_response.retrieve_data_sources( + "indexing_job_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.update_cancel( + path_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_cancel_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + indexing_job = client.genai.indexing_jobs.update_cancel( + path_uuid="uuid", + body_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.indexing_jobs.with_raw_response.update_cancel( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.indexing_jobs.with_streaming_response.update_cancel( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.genai.indexing_jobs.with_raw_response.update_cancel( + path_uuid="", + ) + + +class TestAsyncIndexingJobs: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.create() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.create( + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.indexing_jobs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.indexing_jobs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.retrieve( + "uuid", + ) + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.indexing_jobs.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.indexing_jobs.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.indexing_jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.list() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.list( + page=0, + per_page=0, + ) + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.indexing_jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.indexing_jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.retrieve_data_sources( + "indexing_job_uuid", + ) + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + "indexing_job_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.indexing_jobs.with_streaming_response.retrieve_data_sources( + "indexing_job_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + await async_client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.update_cancel( + path_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_cancel_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + indexing_job = await async_client.genai.indexing_jobs.update_cancel( + path_uuid="uuid", + body_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.indexing_jobs.with_raw_response.update_cancel( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.indexing_jobs.with_streaming_response.update_cancel( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.genai.indexing_jobs.with_raw_response.update_cancel( + path_uuid="", + ) diff --git a/tests/api_resources/genai/test_knowledge_bases.py b/tests/api_resources/genai/test_knowledge_bases.py new file mode 100644 index 00000000..33e2a99b --- /dev/null +++ b/tests/api_resources/genai/test_knowledge_bases.py @@ -0,0 +1,510 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai import ( + KnowledgeBaseListResponse, + KnowledgeBaseCreateResponse, + KnowledgeBaseDeleteResponse, + KnowledgeBaseUpdateResponse, + KnowledgeBaseRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKnowledgeBases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.create() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.create( + database_id="database_id", + datasources=[ + { + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", + "file_upload_data_source": { + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + "item_path": "item_path", + "spaces_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + "web_crawler_data_source": { + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + } + ], + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", + ) + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.retrieve( + "uuid", + ) + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.knowledge_bases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.update( + path_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.update( + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.genai.knowledge_bases.with_raw_response.update( + path_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.list() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.list( + page=0, + per_page=0, + ) + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: + knowledge_base = client.genai.knowledge_bases.delete( + "uuid", + ) + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.knowledge_bases.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.knowledge_bases.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.genai.knowledge_bases.with_raw_response.delete( + "", + ) + + +class TestAsyncKnowledgeBases: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.create() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.create( + database_id="database_id", + datasources=[ + { + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", + "file_upload_data_source": { + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + "item_path": "item_path", + "spaces_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + "web_crawler_data_source": { + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + } + ], + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", + ) + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.retrieve( + "uuid", + ) + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.knowledge_bases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.update( + path_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.update( + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.genai.knowledge_bases.with_raw_response.update( + path_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.list() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.list( + page=0, + per_page=0, + ) + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + knowledge_base = await async_client.genai.knowledge_bases.delete( + "uuid", + ) + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.knowledge_bases.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.knowledge_bases.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.genai.knowledge_bases.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/genai/test_models.py b/tests/api_resources/genai/test_models.py new file mode 100644 index 00000000..a5e9091e --- /dev/null +++ b/tests/api_resources/genai/test_models.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types.genai import ModelListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModels: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + model = client.genai.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + model = client.genai.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModels: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + model = await async_client.genai.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + model = await async_client.genai.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/test_chat.py similarity index 65% rename from tests/api_resources/chat/test_completions.py rename to tests/api_resources/test_chat.py index 9c7ff505..0bf48414 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/test_chat.py @@ -9,18 +9,18 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.chat import CreateResponse +from digitalocean_genai_sdk.types import ChatCreateCompletionResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestCompletions: +class TestChat: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.create( + def test_method_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + chat = client.chat.create_completion( messages=[ { "content": "string", @@ -29,12 +29,12 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: ], model="llama3-8b-instruct", ) - assert_matches_type(CreateResponse, completion, path=["response"]) + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - completion = client.chat.completions.create( + def test_method_create_completion_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + chat = client.chat.create_completion( messages=[ { "content": "string", @@ -58,12 +58,12 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No top_p=1, user="user-1234", ) - assert_matches_type(CreateResponse, completion, path=["response"]) + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.chat.completions.with_raw_response.create( + def test_raw_response_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + response = client.chat.with_raw_response.create_completion( messages=[ { "content": "string", @@ -75,13 +75,13 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) + chat = response.parse() + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.chat.completions.with_streaming_response.create( + def test_streaming_response_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + with client.chat.with_streaming_response.create_completion( messages=[ { "content": "string", @@ -93,19 +93,19 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) + chat = response.parse() + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) assert cast(Any, response.is_closed) is True -class TestAsyncCompletions: +class TestAsyncChat: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.create( + async def test_method_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + chat = await async_client.chat.create_completion( messages=[ { "content": "string", @@ -114,12 +114,12 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N ], model="llama3-8b-instruct", ) - assert_matches_type(CreateResponse, completion, path=["response"]) + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - completion = await async_client.chat.completions.create( + async def test_method_create_completion_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + chat = await async_client.chat.create_completion( messages=[ { "content": "string", @@ -143,12 +143,12 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce top_p=1, user="user-1234", ) - assert_matches_type(CreateResponse, completion, path=["response"]) + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.chat.completions.with_raw_response.create( + async def test_raw_response_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.chat.with_raw_response.create_completion( messages=[ { "content": "string", @@ -160,13 +160,13 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) + chat = await response.parse() + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.chat.completions.with_streaming_response.create( + async def test_streaming_response_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.chat.with_streaming_response.create_completion( messages=[ { "content": "string", @@ -178,7 +178,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - completion = await response.parse() - assert_matches_type(CreateResponse, completion, path=["response"]) + chat = await response.parse() + assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_genai.py b/tests/api_resources/test_genai.py new file mode 100644 index 00000000..6a6c5d93 --- /dev/null +++ b/tests/api_resources/test_genai.py @@ -0,0 +1,96 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import GenaiRetrieveRegionsResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestGenai: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_regions(self, client: DigitaloceanGenaiSDK) -> None: + genai = client.genai.retrieve_regions() + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_regions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + genai = client.genai.retrieve_regions( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_regions(self, client: DigitaloceanGenaiSDK) -> None: + response = client.genai.with_raw_response.retrieve_regions() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + genai = response.parse() + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_regions(self, client: DigitaloceanGenaiSDK) -> None: + with client.genai.with_streaming_response.retrieve_regions() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + genai = response.parse() + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncGenai: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_regions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + genai = await async_client.genai.retrieve_regions() + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_regions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + genai = await async_client.genai.retrieve_regions( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_regions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.genai.with_raw_response.retrieve_regions() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + genai = await response.parse() + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_regions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.genai.with_streaming_response.retrieve_regions() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + genai = await response.parse() + assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/test_client.py b/tests/test_client.py index 7ac3aae1..72994fee 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,17 +23,20 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError from digitalocean_genai_sdk._types import Omit -from digitalocean_genai_sdk._utils import maybe_transform from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER -from digitalocean_genai_sdk._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from digitalocean_genai_sdk._exceptions import ( + APIStatusError, + APITimeoutError, + DigitaloceanGenaiSDKError, + APIResponseValidationError, +) from digitalocean_genai_sdk._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options, ) -from digitalocean_genai_sdk.types.chat.completion_create_params import CompletionCreateParams from .utils import update_env @@ -336,6 +339,16 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" + def test_validate_headers(self) -> None: + client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("Authorization") == f"Bearer {api_key}" + + with pytest.raises(DigitaloceanGenaiSDKError): + with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + _ = client2 + def test_default_query_option(self) -> None: client = DigitaloceanGenaiSDK( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} @@ -714,26 +727,11 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ), - CompletionCreateParams, - ), - ), + self.client.get( + "/v2/gen-ai/agents/uuid/versions", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) @@ -743,26 +741,11 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ), - CompletionCreateParams, - ), - ), + self.client.get( + "/v2/gen-ai/agents/uuid/versions", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) @@ -793,17 +776,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.chat.completions.with_raw_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) + response = client.genai.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -825,17 +800,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - - response = client.chat.completions.with_raw_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - extra_headers={"x-stainless-retry-count": Omit()}, + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + + response = client.genai.agents.versions.with_raw_response.list( + uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -857,17 +825,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - - response = client.chat.completions.with_raw_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - extra_headers={"x-stainless-retry-count": "42"}, + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + + response = client.genai.agents.versions.with_raw_response.list( + uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) assert response.http_request.headers.get("x-stainless-retry-count") == "42" @@ -1179,6 +1140,16 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" + def test_validate_headers(self) -> None: + client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + assert request.headers.get("Authorization") == f"Bearer {api_key}" + + with pytest.raises(DigitaloceanGenaiSDKError): + with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + _ = client2 + def test_default_query_option(self) -> None: client = AsyncDigitaloceanGenaiSDK( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} @@ -1561,26 +1532,11 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ), - CompletionCreateParams, - ), - ), + await self.client.get( + "/v2/gen-ai/agents/uuid/versions", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) @@ -1590,26 +1546,11 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: - respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ), - CompletionCreateParams, - ), - ), + await self.client.get( + "/v2/gen-ai/agents/uuid/versions", cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) @@ -1641,17 +1582,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.chat.completions.with_raw_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) + response = await client.genai.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1674,17 +1607,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - - response = await client.chat.completions.with_raw_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - extra_headers={"x-stainless-retry-count": Omit()}, + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + + response = await client.genai.agents.versions.with_raw_response.list( + uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -1707,17 +1633,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - - response = await client.chat.completions.with_raw_response.create( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - extra_headers={"x-stainless-retry-count": "42"}, + respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + + response = await client.genai.agents.versions.with_raw_response.list( + uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) assert response.http_request.headers.get("x-stainless-retry-count") == "42" From ee60635fa1f5ad6384a16a92ea08f582e531c8b7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 21:26:31 +0000 Subject: [PATCH 009/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 7 ++++++- src/digitalocean_genai_sdk/types/__init__.py | 1 + src/digitalocean_genai_sdk/types/genai/__init__.py | 1 - .../types/genai/agent_create_response.py | 2 +- .../types/genai/agent_delete_response.py | 2 +- .../types/genai/agent_retrieve_response.py | 2 +- .../types/genai/agent_update_response.py | 2 +- .../types/genai/agent_update_status_response.py | 2 +- .../genai/agents/api_link_knowledge_base_output.py | 2 +- .../genai/agents/child_agent_view_response.py | 2 +- .../types/genai/agents/function_create_response.py | 2 +- .../types/genai/agents/function_delete_response.py | 2 +- .../types/genai/agents/function_update_response.py | 2 +- .../genai/agents/knowledge_base_detach_response.py | 2 +- .../anthropic/key_list_agents_response.py | 2 +- .../openai/key_retrieve_agents_response.py | 2 +- .../types/shared/__init__.py | 3 +++ .../types/{genai => shared}/api_agent.py | 14 +++++++------- 19 files changed, 31 insertions(+), 23 deletions(-) create mode 100644 src/digitalocean_genai_sdk/types/shared/__init__.py rename src/digitalocean_genai_sdk/types/{genai => shared}/api_agent.py (93%) diff --git a/.stats.yml b/.stats.yml index ae35d0c6..de237cf4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: a81446c4c386cd3e02581510d751ebe5 +config_hash: a89ea264030d8153f2e22d24183f5ce8 diff --git a/api.md b/api.md index c65f46b7..f794ef17 100644 --- a/api.md +++ b/api.md @@ -1,3 +1,9 @@ +# Shared Types + +```python +from digitalocean_genai_sdk.types import APIAgent +``` + # Genai Types: @@ -16,7 +22,6 @@ Types: ```python from digitalocean_genai_sdk.types.genai import ( - APIAgent, APIDeploymentVisibility, APIModel, APIRetrievalMethod, diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py index 342f0444..e6e8e393 100644 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -3,6 +3,7 @@ from __future__ import annotations from .model import Model as Model +from .shared import APIAgent as APIAgent from .model_list_response import ModelListResponse as ModelListResponse from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse diff --git a/src/digitalocean_genai_sdk/types/genai/__init__.py b/src/digitalocean_genai_sdk/types/genai/__init__.py index 486ef2b0..e56e270e 100644 --- a/src/digitalocean_genai_sdk/types/genai/__init__.py +++ b/src/digitalocean_genai_sdk/types/genai/__init__.py @@ -2,7 +2,6 @@ from __future__ import annotations -from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement from .api_indexing_job import APIIndexingJob as APIIndexingJob diff --git a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py b/src/digitalocean_genai_sdk/types/genai/agent_create_response.py index fc987cf2..e90801a4 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_create_response.py @@ -14,7 +14,7 @@ class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None -from .api_agent import APIAgent +from ..shared.api_agent import APIAgent if PYDANTIC_V2: AgentCreateResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py index aa2cb139..9d70eda9 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py @@ -14,7 +14,7 @@ class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None -from .api_agent import APIAgent +from ..shared.api_agent import APIAgent if PYDANTIC_V2: AgentDeleteResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py index 3c671f1c..18901e0e 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py @@ -14,7 +14,7 @@ class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None -from .api_agent import APIAgent +from ..shared.api_agent import APIAgent if PYDANTIC_V2: AgentRetrieveResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py b/src/digitalocean_genai_sdk/types/genai/agent_update_response.py index 35c64f79..bb3a6d2a 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_response.py @@ -14,7 +14,7 @@ class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None -from .api_agent import APIAgent +from ..shared.api_agent import APIAgent if PYDANTIC_V2: AgentUpdateResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py b/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py index cfda9b73..5fba7f46 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py @@ -14,7 +14,7 @@ class AgentUpdateStatusResponse(BaseModel): agent: Optional["APIAgent"] = None -from .api_agent import APIAgent +from ..shared.api_agent import APIAgent if PYDANTIC_V2: AgentUpdateStatusResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py b/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py index 631ca75d..1815dd8e 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py @@ -14,7 +14,7 @@ class APILinkKnowledgeBaseOutput(BaseModel): agent: Optional["APIAgent"] = None -from ..api_agent import APIAgent +from ...shared.api_agent import APIAgent if PYDANTIC_V2: APILinkKnowledgeBaseOutput.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py index 1768b9ab..87836779 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py @@ -14,7 +14,7 @@ class ChildAgentViewResponse(BaseModel): children: Optional[List["APIAgent"]] = None -from ..api_agent import APIAgent +from ...shared.api_agent import APIAgent if PYDANTIC_V2: ChildAgentViewResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py index fb58caeb..1b8cd625 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py @@ -14,7 +14,7 @@ class FunctionCreateResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..api_agent import APIAgent +from ...shared.api_agent import APIAgent if PYDANTIC_V2: FunctionCreateResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py index be5e1fda..2fde55fc 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py @@ -14,7 +14,7 @@ class FunctionDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..api_agent import APIAgent +from ...shared.api_agent import APIAgent if PYDANTIC_V2: FunctionDeleteResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py index b9970518..16b9aea2 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py @@ -14,7 +14,7 @@ class FunctionUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..api_agent import APIAgent +from ...shared.api_agent import APIAgent if PYDANTIC_V2: FunctionUpdateResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py b/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py index e6d0c7ad..de74a98b 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py @@ -14,7 +14,7 @@ class KnowledgeBaseDetachResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..api_agent import APIAgent +from ...shared.api_agent import APIAgent if PYDANTIC_V2: KnowledgeBaseDetachResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py index 4c2453f6..dd7424de 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py @@ -20,7 +20,7 @@ class KeyListAgentsResponse(BaseModel): meta: Optional[APIMeta] = None -from ...api_agent import APIAgent +from ....shared.api_agent import APIAgent if PYDANTIC_V2: KeyListAgentsResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py index af1fdf37..5c259abb 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py @@ -20,7 +20,7 @@ class KeyRetrieveAgentsResponse(BaseModel): meta: Optional[APIMeta] = None -from ...api_agent import APIAgent +from ....shared.api_agent import APIAgent if PYDANTIC_V2: KeyRetrieveAgentsResponse.model_rebuild() diff --git a/src/digitalocean_genai_sdk/types/shared/__init__.py b/src/digitalocean_genai_sdk/types/shared/__init__.py new file mode 100644 index 00000000..412e6a13 --- /dev/null +++ b/src/digitalocean_genai_sdk/types/shared/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .api_agent import APIAgent as APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/api_agent.py b/src/digitalocean_genai_sdk/types/shared/api_agent.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/api_agent.py rename to src/digitalocean_genai_sdk/types/shared/api_agent.py index 8a235001..018198bd 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_agent.py +++ b/src/digitalocean_genai_sdk/types/shared/api_agent.py @@ -8,13 +8,13 @@ from ..._compat import PYDANTIC_V2 from ..._models import BaseModel -from .api_model import APIModel -from .api_knowledge_base import APIKnowledgeBase -from .api_retrieval_method import APIRetrievalMethod -from .api_deployment_visibility import APIDeploymentVisibility -from .agents.api_agent_api_key_info import APIAgentAPIKeyInfo -from .providers.openai.api_openai_api_key_info import APIOpenAIAPIKeyInfo -from .providers.anthropic.api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from ..genai.api_model import APIModel +from ..genai.api_knowledge_base import APIKnowledgeBase +from ..genai.api_retrieval_method import APIRetrievalMethod +from ..genai.api_deployment_visibility import APIDeploymentVisibility +from ..genai.agents.api_agent_api_key_info import APIAgentAPIKeyInfo +from ..genai.providers.openai.api_openai_api_key_info import APIOpenAIAPIKeyInfo +from ..genai.providers.anthropic.api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = [ "APIAgent", From 682ae09e287f4147c8a82966e5b063501c7ab2ad Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 21:31:33 +0000 Subject: [PATCH 010/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index de237cf4..3505721d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: a89ea264030d8153f2e22d24183f5ce8 +config_hash: 69ff7af00780ef56932e2fcc4fd37584 From dd9eb9da756b0050af4c14f52827fe52b06d7f30 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 10:06:58 +0000 Subject: [PATCH 011/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 7 +- src/digitalocean_genai_sdk/_client.py | 224 ++++++++++++++---- src/digitalocean_genai_sdk/types/__init__.py | 1 - .../types/genai/__init__.py | 1 + .../types/genai/agent_create_response.py | 8 +- .../types/genai/agent_delete_response.py | 8 +- .../types/genai/agent_retrieve_response.py | 8 +- .../types/genai/agent_update_response.py | 8 +- .../genai/agent_update_status_response.py | 8 +- .../agents/api_link_knowledge_base_output.py | 8 +- .../genai/agents/child_agent_view_response.py | 8 +- .../genai/agents/function_create_response.py | 8 +- .../genai/agents/function_delete_response.py | 8 +- .../genai/agents/function_update_response.py | 8 +- .../agents/knowledge_base_detach_response.py | 8 +- .../types/{shared => genai}/api_agent.py | 37 +-- .../anthropic/key_list_agents_response.py | 8 +- .../openai/key_retrieve_agents_response.py | 8 +- .../types/shared/__init__.py | 3 - 20 files changed, 202 insertions(+), 177 deletions(-) rename src/digitalocean_genai_sdk/types/{shared => genai}/api_agent.py (81%) delete mode 100644 src/digitalocean_genai_sdk/types/shared/__init__.py diff --git a/.stats.yml b/.stats.yml index 3505721d..8cfc80f5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 69ff7af00780ef56932e2fcc4fd37584 +config_hash: 43595338207b5728be143c6184c8285e diff --git a/api.md b/api.md index f794ef17..c65f46b7 100644 --- a/api.md +++ b/api.md @@ -1,9 +1,3 @@ -# Shared Types - -```python -from digitalocean_genai_sdk.types import APIAgent -``` - # Genai Types: @@ -22,6 +16,7 @@ Types: ```python from digitalocean_genai_sdk.types.genai import ( + APIAgent, APIDeploymentVisibility, APIModel, APIRetrievalMethod, diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py index 3ce8b62f..cd3ecf4e 100644 --- a/src/digitalocean_genai_sdk/_client.py +++ b/src/digitalocean_genai_sdk/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import Any, Union, Mapping +from typing import TYPE_CHECKING, Any, Union, Mapping from typing_extensions import Self, override import httpx @@ -20,8 +20,8 @@ RequestOptions, ) from ._utils import is_given, get_async_library +from ._compat import cached_property from ._version import __version__ -from .resources import chat, models, embeddings from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError from ._base_client import ( @@ -29,7 +29,13 @@ SyncAPIClient, AsyncAPIClient, ) -from .resources.genai import genai + +if TYPE_CHECKING: + from .resources import chat, genai, models, embeddings + from .resources.chat import ChatResource, AsyncChatResource + from .resources.models import ModelsResource, AsyncModelsResource + from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource + from .resources.genai.genai import GenaiResource, AsyncGenaiResource __all__ = [ "Timeout", @@ -44,13 +50,6 @@ class DigitaloceanGenaiSDK(SyncAPIClient): - genai: genai.GenaiResource - chat: chat.ChatResource - embeddings: embeddings.EmbeddingsResource - models: models.ModelsResource - with_raw_response: DigitaloceanGenaiSDKWithRawResponse - with_streaming_response: DigitaloceanGenaiSDKWithStreamedResponse - # client options api_key: str @@ -105,12 +104,37 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.genai = genai.GenaiResource(self) - self.chat = chat.ChatResource(self) - self.embeddings = embeddings.EmbeddingsResource(self) - self.models = models.ModelsResource(self) - self.with_raw_response = DigitaloceanGenaiSDKWithRawResponse(self) - self.with_streaming_response = DigitaloceanGenaiSDKWithStreamedResponse(self) + @cached_property + def genai(self) -> GenaiResource: + from .resources.genai import GenaiResource + + return GenaiResource(self) + + @cached_property + def chat(self) -> ChatResource: + from .resources.chat import ChatResource + + return ChatResource(self) + + @cached_property + def embeddings(self) -> EmbeddingsResource: + from .resources.embeddings import EmbeddingsResource + + return EmbeddingsResource(self) + + @cached_property + def models(self) -> ModelsResource: + from .resources.models import ModelsResource + + return ModelsResource(self) + + @cached_property + def with_raw_response(self) -> DigitaloceanGenaiSDKWithRawResponse: + return DigitaloceanGenaiSDKWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DigitaloceanGenaiSDKWithStreamedResponse: + return DigitaloceanGenaiSDKWithStreamedResponse(self) @property @override @@ -218,13 +242,6 @@ def _make_status_error( class AsyncDigitaloceanGenaiSDK(AsyncAPIClient): - genai: genai.AsyncGenaiResource - chat: chat.AsyncChatResource - embeddings: embeddings.AsyncEmbeddingsResource - models: models.AsyncModelsResource - with_raw_response: AsyncDigitaloceanGenaiSDKWithRawResponse - with_streaming_response: AsyncDigitaloceanGenaiSDKWithStreamedResponse - # client options api_key: str @@ -279,12 +296,37 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.genai = genai.AsyncGenaiResource(self) - self.chat = chat.AsyncChatResource(self) - self.embeddings = embeddings.AsyncEmbeddingsResource(self) - self.models = models.AsyncModelsResource(self) - self.with_raw_response = AsyncDigitaloceanGenaiSDKWithRawResponse(self) - self.with_streaming_response = AsyncDigitaloceanGenaiSDKWithStreamedResponse(self) + @cached_property + def genai(self) -> AsyncGenaiResource: + from .resources.genai import AsyncGenaiResource + + return AsyncGenaiResource(self) + + @cached_property + def chat(self) -> AsyncChatResource: + from .resources.chat import AsyncChatResource + + return AsyncChatResource(self) + + @cached_property + def embeddings(self) -> AsyncEmbeddingsResource: + from .resources.embeddings import AsyncEmbeddingsResource + + return AsyncEmbeddingsResource(self) + + @cached_property + def models(self) -> AsyncModelsResource: + from .resources.models import AsyncModelsResource + + return AsyncModelsResource(self) + + @cached_property + def with_raw_response(self) -> AsyncDigitaloceanGenaiSDKWithRawResponse: + return AsyncDigitaloceanGenaiSDKWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDigitaloceanGenaiSDKWithStreamedResponse: + return AsyncDigitaloceanGenaiSDKWithStreamedResponse(self) @property @override @@ -392,35 +434,127 @@ def _make_status_error( class DigitaloceanGenaiSDKWithRawResponse: + _client: DigitaloceanGenaiSDK + def __init__(self, client: DigitaloceanGenaiSDK) -> None: - self.genai = genai.GenaiResourceWithRawResponse(client.genai) - self.chat = chat.ChatResourceWithRawResponse(client.chat) - self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings) - self.models = models.ModelsResourceWithRawResponse(client.models) + self._client = client + + @cached_property + def genai(self) -> genai.GenaiResourceWithRawResponse: + from .resources.genai import GenaiResourceWithRawResponse + + return GenaiResourceWithRawResponse(self._client.genai) + + @cached_property + def chat(self) -> chat.ChatResourceWithRawResponse: + from .resources.chat import ChatResourceWithRawResponse + + return ChatResourceWithRawResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse: + from .resources.embeddings import EmbeddingsResourceWithRawResponse + + return EmbeddingsResourceWithRawResponse(self._client.embeddings) + + @cached_property + def models(self) -> models.ModelsResourceWithRawResponse: + from .resources.models import ModelsResourceWithRawResponse + + return ModelsResourceWithRawResponse(self._client.models) class AsyncDigitaloceanGenaiSDKWithRawResponse: + _client: AsyncDigitaloceanGenaiSDK + def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: - self.genai = genai.AsyncGenaiResourceWithRawResponse(client.genai) - self.chat = chat.AsyncChatResourceWithRawResponse(client.chat) - self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings) - self.models = models.AsyncModelsResourceWithRawResponse(client.models) + self._client = client + + @cached_property + def genai(self) -> genai.AsyncGenaiResourceWithRawResponse: + from .resources.genai import AsyncGenaiResourceWithRawResponse + + return AsyncGenaiResourceWithRawResponse(self._client.genai) + + @cached_property + def chat(self) -> chat.AsyncChatResourceWithRawResponse: + from .resources.chat import AsyncChatResourceWithRawResponse + + return AsyncChatResourceWithRawResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse: + from .resources.embeddings import AsyncEmbeddingsResourceWithRawResponse + + return AsyncEmbeddingsResourceWithRawResponse(self._client.embeddings) + + @cached_property + def models(self) -> models.AsyncModelsResourceWithRawResponse: + from .resources.models import AsyncModelsResourceWithRawResponse + + return AsyncModelsResourceWithRawResponse(self._client.models) class DigitaloceanGenaiSDKWithStreamedResponse: + _client: DigitaloceanGenaiSDK + def __init__(self, client: DigitaloceanGenaiSDK) -> None: - self.genai = genai.GenaiResourceWithStreamingResponse(client.genai) - self.chat = chat.ChatResourceWithStreamingResponse(client.chat) - self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings) - self.models = models.ModelsResourceWithStreamingResponse(client.models) + self._client = client + + @cached_property + def genai(self) -> genai.GenaiResourceWithStreamingResponse: + from .resources.genai import GenaiResourceWithStreamingResponse + + return GenaiResourceWithStreamingResponse(self._client.genai) + + @cached_property + def chat(self) -> chat.ChatResourceWithStreamingResponse: + from .resources.chat import ChatResourceWithStreamingResponse + + return ChatResourceWithStreamingResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse: + from .resources.embeddings import EmbeddingsResourceWithStreamingResponse + + return EmbeddingsResourceWithStreamingResponse(self._client.embeddings) + + @cached_property + def models(self) -> models.ModelsResourceWithStreamingResponse: + from .resources.models import ModelsResourceWithStreamingResponse + + return ModelsResourceWithStreamingResponse(self._client.models) class AsyncDigitaloceanGenaiSDKWithStreamedResponse: + _client: AsyncDigitaloceanGenaiSDK + def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: - self.genai = genai.AsyncGenaiResourceWithStreamingResponse(client.genai) - self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat) - self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings) - self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) + self._client = client + + @cached_property + def genai(self) -> genai.AsyncGenaiResourceWithStreamingResponse: + from .resources.genai import AsyncGenaiResourceWithStreamingResponse + + return AsyncGenaiResourceWithStreamingResponse(self._client.genai) + + @cached_property + def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: + from .resources.chat import AsyncChatResourceWithStreamingResponse + + return AsyncChatResourceWithStreamingResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse: + from .resources.embeddings import AsyncEmbeddingsResourceWithStreamingResponse + + return AsyncEmbeddingsResourceWithStreamingResponse(self._client.embeddings) + + @cached_property + def models(self) -> models.AsyncModelsResourceWithStreamingResponse: + from .resources.models import AsyncModelsResourceWithStreamingResponse + + return AsyncModelsResourceWithStreamingResponse(self._client.models) Client = DigitaloceanGenaiSDK diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py index e6e8e393..342f0444 100644 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -3,7 +3,6 @@ from __future__ import annotations from .model import Model as Model -from .shared import APIAgent as APIAgent from .model_list_response import ModelListResponse as ModelListResponse from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse diff --git a/src/digitalocean_genai_sdk/types/genai/__init__.py b/src/digitalocean_genai_sdk/types/genai/__init__.py index e56e270e..486ef2b0 100644 --- a/src/digitalocean_genai_sdk/types/genai/__init__.py +++ b/src/digitalocean_genai_sdk/types/genai/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement from .api_indexing_job import APIIndexingJob as APIIndexingJob diff --git a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py b/src/digitalocean_genai_sdk/types/genai/agent_create_response.py index e90801a4..414dc562 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_create_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ..._compat import PYDANTIC_V2 from ..._models import BaseModel __all__ = ["AgentCreateResponse"] @@ -14,9 +13,4 @@ class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..shared.api_agent import APIAgent - -if PYDANTIC_V2: - AgentCreateResponse.model_rebuild() -else: - AgentCreateResponse.update_forward_refs() # type: ignore +from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py index 9d70eda9..aeca20a9 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ..._compat import PYDANTIC_V2 from ..._models import BaseModel __all__ = ["AgentDeleteResponse"] @@ -14,9 +13,4 @@ class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..shared.api_agent import APIAgent - -if PYDANTIC_V2: - AgentDeleteResponse.model_rebuild() -else: - AgentDeleteResponse.update_forward_refs() # type: ignore +from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py b/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py index 18901e0e..7a544dbe 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ..._compat import PYDANTIC_V2 from ..._models import BaseModel __all__ = ["AgentRetrieveResponse"] @@ -14,9 +13,4 @@ class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..shared.api_agent import APIAgent - -if PYDANTIC_V2: - AgentRetrieveResponse.model_rebuild() -else: - AgentRetrieveResponse.update_forward_refs() # type: ignore +from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py b/src/digitalocean_genai_sdk/types/genai/agent_update_response.py index bb3a6d2a..d626a69a 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ..._compat import PYDANTIC_V2 from ..._models import BaseModel __all__ = ["AgentUpdateResponse"] @@ -14,9 +13,4 @@ class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..shared.api_agent import APIAgent - -if PYDANTIC_V2: - AgentUpdateResponse.model_rebuild() -else: - AgentUpdateResponse.update_forward_refs() # type: ignore +from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py b/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py index 5fba7f46..39c3dd0d 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ..._compat import PYDANTIC_V2 from ..._models import BaseModel __all__ = ["AgentUpdateStatusResponse"] @@ -14,9 +13,4 @@ class AgentUpdateStatusResponse(BaseModel): agent: Optional["APIAgent"] = None -from ..shared.api_agent import APIAgent - -if PYDANTIC_V2: - AgentUpdateStatusResponse.model_rebuild() -else: - AgentUpdateStatusResponse.update_forward_refs() # type: ignore +from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py b/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py index 1815dd8e..b4ce4d94 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py @@ -4,7 +4,6 @@ from typing import Optional -from ...._compat import PYDANTIC_V2 from ...._models import BaseModel __all__ = ["APILinkKnowledgeBaseOutput"] @@ -14,9 +13,4 @@ class APILinkKnowledgeBaseOutput(BaseModel): agent: Optional["APIAgent"] = None -from ...shared.api_agent import APIAgent - -if PYDANTIC_V2: - APILinkKnowledgeBaseOutput.model_rebuild() -else: - APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore +from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py index 87836779..4bbf7464 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py @@ -4,7 +4,6 @@ from typing import List, Optional -from ...._compat import PYDANTIC_V2 from ...._models import BaseModel __all__ = ["ChildAgentViewResponse"] @@ -14,9 +13,4 @@ class ChildAgentViewResponse(BaseModel): children: Optional[List["APIAgent"]] = None -from ...shared.api_agent import APIAgent - -if PYDANTIC_V2: - ChildAgentViewResponse.model_rebuild() -else: - ChildAgentViewResponse.update_forward_refs() # type: ignore +from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py index 1b8cd625..d1bf4a0f 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ...._compat import PYDANTIC_V2 from ...._models import BaseModel __all__ = ["FunctionCreateResponse"] @@ -14,9 +13,4 @@ class FunctionCreateResponse(BaseModel): agent: Optional["APIAgent"] = None -from ...shared.api_agent import APIAgent - -if PYDANTIC_V2: - FunctionCreateResponse.model_rebuild() -else: - FunctionCreateResponse.update_forward_refs() # type: ignore +from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py index 2fde55fc..f39a4ba1 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ...._compat import PYDANTIC_V2 from ...._models import BaseModel __all__ = ["FunctionDeleteResponse"] @@ -14,9 +13,4 @@ class FunctionDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None -from ...shared.api_agent import APIAgent - -if PYDANTIC_V2: - FunctionDeleteResponse.model_rebuild() -else: - FunctionDeleteResponse.update_forward_refs() # type: ignore +from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py b/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py index 16b9aea2..a63fc100 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ...._compat import PYDANTIC_V2 from ...._models import BaseModel __all__ = ["FunctionUpdateResponse"] @@ -14,9 +13,4 @@ class FunctionUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None -from ...shared.api_agent import APIAgent - -if PYDANTIC_V2: - FunctionUpdateResponse.model_rebuild() -else: - FunctionUpdateResponse.update_forward_refs() # type: ignore +from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py b/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py index de74a98b..a3b7de4c 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py +++ b/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py @@ -4,7 +4,6 @@ from typing import Optional -from ...._compat import PYDANTIC_V2 from ...._models import BaseModel __all__ = ["KnowledgeBaseDetachResponse"] @@ -14,9 +13,4 @@ class KnowledgeBaseDetachResponse(BaseModel): agent: Optional["APIAgent"] = None -from ...shared.api_agent import APIAgent - -if PYDANTIC_V2: - KnowledgeBaseDetachResponse.model_rebuild() -else: - KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore +from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/shared/api_agent.py b/src/digitalocean_genai_sdk/types/genai/api_agent.py similarity index 81% rename from src/digitalocean_genai_sdk/types/shared/api_agent.py rename to src/digitalocean_genai_sdk/types/genai/api_agent.py index 018198bd..243b8d53 100644 --- a/src/digitalocean_genai_sdk/types/shared/api_agent.py +++ b/src/digitalocean_genai_sdk/types/genai/api_agent.py @@ -6,15 +6,14 @@ from datetime import datetime from typing_extensions import Literal -from ..._compat import PYDANTIC_V2 from ..._models import BaseModel -from ..genai.api_model import APIModel -from ..genai.api_knowledge_base import APIKnowledgeBase -from ..genai.api_retrieval_method import APIRetrievalMethod -from ..genai.api_deployment_visibility import APIDeploymentVisibility -from ..genai.agents.api_agent_api_key_info import APIAgentAPIKeyInfo -from ..genai.providers.openai.api_openai_api_key_info import APIOpenAIAPIKeyInfo -from ..genai.providers.anthropic.api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from .api_model import APIModel +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_deployment_visibility import APIDeploymentVisibility +from .agents.api_agent_api_key_info import APIAgentAPIKeyInfo +from .providers.openai.api_openai_api_key_info import APIOpenAIAPIKeyInfo +from .providers.anthropic.api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = [ "APIAgent", @@ -262,25 +261,3 @@ class APIAgent(BaseModel): user_id: Optional[str] = None uuid: Optional[str] = None - - -if PYDANTIC_V2: - APIAgent.model_rebuild() - APIKey.model_rebuild() - Chatbot.model_rebuild() - ChatbotIdentifier.model_rebuild() - Deployment.model_rebuild() - Function.model_rebuild() - Guardrail.model_rebuild() - Template.model_rebuild() - TemplateGuardrail.model_rebuild() -else: - APIAgent.update_forward_refs() # type: ignore - APIKey.update_forward_refs() # type: ignore - Chatbot.update_forward_refs() # type: ignore - ChatbotIdentifier.update_forward_refs() # type: ignore - Deployment.update_forward_refs() # type: ignore - Function.update_forward_refs() # type: ignore - Guardrail.update_forward_refs() # type: ignore - Template.update_forward_refs() # type: ignore - TemplateGuardrail.update_forward_refs() # type: ignore diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py index dd7424de..558aef6c 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py +++ b/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py @@ -4,7 +4,6 @@ from typing import List, Optional -from ....._compat import PYDANTIC_V2 from ....._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks @@ -20,9 +19,4 @@ class KeyListAgentsResponse(BaseModel): meta: Optional[APIMeta] = None -from ....shared.api_agent import APIAgent - -if PYDANTIC_V2: - KeyListAgentsResponse.model_rebuild() -else: - KeyListAgentsResponse.update_forward_refs() # type: ignore +from ...api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py index 5c259abb..ee25321e 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py +++ b/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py @@ -4,7 +4,6 @@ from typing import List, Optional -from ....._compat import PYDANTIC_V2 from ....._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks @@ -20,9 +19,4 @@ class KeyRetrieveAgentsResponse(BaseModel): meta: Optional[APIMeta] = None -from ....shared.api_agent import APIAgent - -if PYDANTIC_V2: - KeyRetrieveAgentsResponse.model_rebuild() -else: - KeyRetrieveAgentsResponse.update_forward_refs() # type: ignore +from ...api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/shared/__init__.py b/src/digitalocean_genai_sdk/types/shared/__init__.py deleted file mode 100644 index 412e6a13..00000000 --- a/src/digitalocean_genai_sdk/types/shared/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .api_agent import APIAgent as APIAgent From 856e94a447fa27908eb1ab293685f4cde2caf815 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 15:06:02 +0000 Subject: [PATCH 012/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 18 +- api.md | 200 ++++----- src/digitalocean_genai_sdk/_client.py | 273 ++++++++++++- .../resources/__init__.py | 110 ++++- .../resources/{genai => }/agents/__init__.py | 0 .../resources/{genai => }/agents/agents.py | 44 +- .../resources/{genai => }/agents/api_keys.py | 24 +- .../{genai => }/agents/child_agents.py | 22 +- .../resources/{genai => }/agents/functions.py | 20 +- .../{genai => }/agents/knowledge_bases.py | 14 +- .../resources/{genai => }/agents/versions.py | 18 +- .../{genai/models => api_keys}/__init__.py | 14 - .../models/models.py => api_keys/api_keys.py} | 115 +++--- .../api_keys.py => api_keys/api_keys_.py} | 24 +- .../resources/{genai => }/auth/__init__.py | 0 .../{genai => }/auth/agents/__init__.py | 0 .../{genai => }/auth/agents/agents.py | 4 +- .../{genai => }/auth/agents/token.py | 16 +- .../resources/{genai => }/auth/auth.py | 4 +- .../resources/genai/__init__.py | 103 ----- .../resources/genai/genai.py | 383 ------------------ .../resources/{genai => }/indexing_jobs.py | 24 +- .../{genai => }/knowledge_bases/__init__.py | 0 .../knowledge_bases/data_sources.py | 24 +- .../knowledge_bases/knowledge_bases.py | 24 +- .../{genai => }/providers/__init__.py | 0 .../providers/anthropic/__init__.py | 0 .../providers/anthropic/anthropic.py | 4 +- .../{genai => }/providers/anthropic/keys.py | 31 +- .../{genai => }/providers/openai/__init__.py | 0 .../{genai => }/providers/openai/keys.py | 31 +- .../{genai => }/providers/openai/openai.py | 4 +- .../{genai => }/providers/providers.py | 4 +- .../resources/regions.py | 191 +++++++++ src/digitalocean_genai_sdk/types/__init__.py | 42 +- .../types/{genai => }/agent_create_params.py | 2 +- .../{genai => }/agent_create_response.py | 2 +- .../{genai => }/agent_delete_response.py | 2 +- .../types/{genai => }/agent_list_params.py | 0 .../types/{genai => }/agent_list_response.py | 2 +- .../{genai => }/agent_retrieve_response.py | 2 +- .../types/{genai => }/agent_update_params.py | 2 +- .../{genai => }/agent_update_response.py | 2 +- .../{genai => }/agent_update_status_params.py | 2 +- .../agent_update_status_response.py | 2 +- .../types/{genai => }/agents/__init__.py | 0 .../agents/api_agent_api_key_info.py | 2 +- .../agents/api_key_create_params.py | 2 +- .../agents/api_key_create_response.py | 2 +- .../agents/api_key_delete_response.py | 2 +- .../{genai => }/agents/api_key_list_params.py | 0 .../agents/api_key_list_response.py | 2 +- .../agents/api_key_regenerate_response.py | 2 +- .../agents/api_key_update_params.py | 2 +- .../agents/api_key_update_response.py | 2 +- .../agents/api_link_knowledge_base_output.py | 2 +- .../types/{genai => }/agents/api_links.py | 2 +- .../types/{genai => }/agents/api_meta.py | 2 +- .../agents/child_agent_add_params.py | 2 +- .../agents/child_agent_add_response.py | 2 +- .../agents/child_agent_delete_response.py | 2 +- .../agents/child_agent_update_params.py | 2 +- .../agents/child_agent_update_response.py | 2 +- .../agents/child_agent_view_response.py | 2 +- .../agents/function_create_params.py | 2 +- .../agents/function_create_response.py | 2 +- .../agents/function_delete_response.py | 2 +- .../agents/function_update_params.py | 2 +- .../agents/function_update_response.py | 2 +- .../agents/knowledge_base_detach_response.py | 2 +- .../{genai => }/agents/version_list_params.py | 0 .../agents/version_list_response.py | 2 +- .../agents/version_update_params.py | 2 +- .../agents/version_update_response.py | 2 +- .../types/{genai => }/api_agent.py | 2 +- .../types/{genai => }/api_agreement.py | 2 +- .../{genai => }/api_deployment_visibility.py | 0 .../types/{genai => }/api_indexing_job.py | 2 +- ..._list_params.py => api_key_list_params.py} | 4 +- ...t_response.py => api_key_list_response.py} | 6 +- .../{genai/models => api_keys}/__init__.py | 0 .../api_key_create_params.py | 0 .../api_key_create_response.py | 2 +- .../api_key_delete_response.py | 2 +- .../api_key_list_params.py | 0 .../api_key_list_response.py | 2 +- .../api_key_update_params.py | 2 +- .../api_key_update_regenerate_response.py | 2 +- .../api_key_update_response.py | 2 +- .../api_model_api_key_info.py | 2 +- .../types/{genai => }/api_knowledge_base.py | 2 +- .../types/{genai => }/api_model.py | 2 +- .../types/{genai => }/api_model_version.py | 2 +- .../types/{genai => }/api_retrieval_method.py | 0 .../types/{genai => }/auth/__init__.py | 0 .../types/{genai => }/auth/agents/__init__.py | 0 .../auth/agents/token_create_params.py | 2 +- .../auth/agents/token_create_response.py | 2 +- .../types/genai/__init__.py | 42 -- .../{genai => }/indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 2 +- .../{genai => }/indexing_job_list_params.py | 0 .../{genai => }/indexing_job_list_response.py | 2 +- ...xing_job_retrieve_data_sources_response.py | 2 +- .../indexing_job_retrieve_response.py | 2 +- .../indexing_job_update_cancel_params.py | 2 +- .../indexing_job_update_cancel_response.py | 2 +- .../knowledge_base_create_params.py | 0 .../knowledge_base_create_response.py | 2 +- .../knowledge_base_delete_response.py | 2 +- .../{genai => }/knowledge_base_list_params.py | 0 .../knowledge_base_list_response.py | 2 +- .../knowledge_base_retrieve_response.py | 2 +- .../knowledge_base_update_params.py | 2 +- .../knowledge_base_update_response.py | 2 +- .../{genai => }/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 2 +- .../api_file_upload_data_source_param.py | 0 .../api_knowledge_base_data_source.py | 2 +- .../knowledge_bases/api_spaces_data_source.py | 2 +- .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 2 +- .../api_web_crawler_data_source_param.py | 0 .../data_source_create_params.py | 2 +- .../data_source_create_response.py | 2 +- .../data_source_delete_response.py | 2 +- .../data_source_list_params.py | 0 .../data_source_list_response.py | 2 +- .../types/{genai => }/providers/__init__.py | 0 .../providers/anthropic/__init__.py | 0 .../anthropic/api_anthropic_api_key_info.py | 2 +- .../providers/anthropic/key_create_params.py | 0 .../anthropic/key_create_response.py | 2 +- .../anthropic/key_delete_response.py | 2 +- .../anthropic/key_list_agents_params.py | 0 .../anthropic/key_list_agents_response.py | 2 +- .../providers/anthropic/key_list_params.py | 0 .../providers/anthropic/key_list_response.py | 2 +- .../anthropic/key_retrieve_response.py | 2 +- .../providers/anthropic/key_update_params.py | 2 +- .../anthropic/key_update_response.py | 2 +- .../{genai => }/providers/openai/__init__.py | 0 .../openai/api_openai_api_key_info.py | 2 +- .../providers/openai/key_create_params.py | 0 .../providers/openai/key_create_response.py | 2 +- .../providers/openai/key_delete_response.py | 2 +- .../providers/openai/key_list_params.py | 0 .../providers/openai/key_list_response.py | 2 +- .../openai/key_retrieve_agents_params.py | 0 .../openai/key_retrieve_agents_response.py | 2 +- .../providers/openai/key_retrieve_response.py | 2 +- .../providers/openai/key_update_params.py | 2 +- .../providers/openai/key_update_response.py | 2 +- ...egions_params.py => region_list_params.py} | 4 +- ...ns_response.py => region_list_response.py} | 4 +- .../{genai => agents}/__init__.py | 0 .../{genai => }/agents/test_api_keys.py | 106 ++--- .../{genai => }/agents/test_child_agents.py | 86 ++-- .../{genai => }/agents/test_functions.py | 66 +-- .../agents/test_knowledge_bases.py | 58 +-- .../{genai => }/agents/test_versions.py | 42 +- .../{genai/agents => api_keys}/__init__.py | 0 .../test_api_keys_.py} | 86 ++-- .../{genai => }/auth/__init__.py | 0 .../{genai => }/auth/agents/__init__.py | 0 .../{genai => }/auth/agents/test_token.py | 22 +- .../genai/providers/openai/__init__.py | 1 - .../{genai => }/knowledge_bases/__init__.py | 0 .../knowledge_bases/test_data_sources.py | 62 +-- .../{genai/models => providers}/__init__.py | 0 .../anthropic}/__init__.py | 0 .../providers/anthropic/test_keys.py | 106 ++--- .../openai}/__init__.py | 0 .../{genai => }/providers/openai/test_keys.py | 106 ++--- .../api_resources/{genai => }/test_agents.py | 106 ++--- .../test_models.py => test_api_keys.py} | 46 +-- tests/api_resources/test_genai.py | 96 ----- .../{genai => }/test_indexing_jobs.py | 86 ++-- .../{genai => }/test_knowledge_bases.py | 86 ++-- tests/api_resources/test_regions.py | 96 +++++ tests/test_client.py | 12 +- 182 files changed, 1637 insertions(+), 1651 deletions(-) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/agents.py (97%) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/api_keys.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/child_agents.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/functions.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/knowledge_bases.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/agents/versions.py (95%) rename src/digitalocean_genai_sdk/resources/{genai/models => api_keys}/__init__.py (55%) rename src/digitalocean_genai_sdk/resources/{genai/models/models.py => api_keys/api_keys.py} (72%) rename src/digitalocean_genai_sdk/resources/{genai/models/api_keys.py => api_keys/api_keys_.py} (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/auth/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/auth/agents/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/auth/agents/agents.py (97%) rename src/digitalocean_genai_sdk/resources/{genai => }/auth/agents/token.py (93%) rename src/digitalocean_genai_sdk/resources/{genai => }/auth/auth.py (97%) delete mode 100644 src/digitalocean_genai_sdk/resources/genai/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/genai/genai.py rename src/digitalocean_genai_sdk/resources/{genai => }/indexing_jobs.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/knowledge_bases/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/knowledge_bases/data_sources.py (95%) rename src/digitalocean_genai_sdk/resources/{genai => }/knowledge_bases/knowledge_bases.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/anthropic/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/anthropic/anthropic.py (97%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/anthropic/keys.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/openai/__init__.py (100%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/openai/keys.py (96%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/openai/openai.py (97%) rename src/digitalocean_genai_sdk/resources/{genai => }/providers/providers.py (97%) create mode 100644 src/digitalocean_genai_sdk/resources/regions.py rename src/digitalocean_genai_sdk/types/{genai => }/agent_create_params.py (96%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_create_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_delete_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_list_response.py (99%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_retrieve_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_update_params.py (98%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_update_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_update_status_params.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/agent_update_status_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_agent_api_key_info.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_create_params.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_create_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_delete_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_list_response.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_regenerate_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_update_params.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_key_update_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_link_knowledge_base_output.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_links.py (91%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/api_meta.py (88%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/child_agent_add_params.py (94%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/child_agent_add_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/child_agent_delete_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/child_agent_update_params.py (94%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/child_agent_update_response.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/child_agent_view_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/function_create_params.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/function_create_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/function_delete_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/function_update_params.py (94%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/function_update_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/knowledge_base_detach_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/version_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/version_list_response.py (98%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/version_update_params.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/agents/version_update_response.py (95%) rename src/digitalocean_genai_sdk/types/{genai => }/api_agent.py (99%) rename src/digitalocean_genai_sdk/types/{genai => }/api_agreement.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/api_deployment_visibility.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/api_indexing_job.py (96%) rename src/digitalocean_genai_sdk/types/{genai/model_list_params.py => api_key_list_params.py} (94%) rename src/digitalocean_genai_sdk/types/{genai/model_list_response.py => api_key_list_response.py} (88%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_create_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_create_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_delete_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_list_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_update_params.py (90%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_update_regenerate_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_key_update_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai/models => api_keys}/api_model_api_key_info.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/api_knowledge_base.py (95%) rename src/digitalocean_genai_sdk/types/{genai => }/api_model.py (97%) rename src/digitalocean_genai_sdk/types/{genai => }/api_model_version.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/api_retrieval_method.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/auth/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/auth/agents/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/auth/agents/token_create_params.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/auth/agents/token_create_response.py (88%) delete mode 100644 src/digitalocean_genai_sdk/types/genai/__init__.py rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_create_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_create_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_list_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_retrieve_data_sources_response.py (97%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_retrieve_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_update_cancel_params.py (91%) rename src/digitalocean_genai_sdk/types/{genai => }/indexing_job_update_cancel_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_create_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_create_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_delete_response.py (87%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_list_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_retrieve_response.py (95%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_update_params.py (94%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_base_update_response.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_file_upload_data_source.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_knowledge_base_data_source.py (96%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_spaces_data_source.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_web_crawler_data_source.py (96%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/data_source_create_params.py (95%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/data_source_create_response.py (91%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/data_source_delete_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/data_source_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/knowledge_bases/data_source_list_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/api_anthropic_api_key_info.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_create_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_create_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_delete_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_list_agents_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_list_agents_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_list_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_retrieve_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_update_params.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/anthropic/key_update_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/__init__.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/api_openai_api_key_info.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_create_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_create_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_delete_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_list_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_list_response.py (92%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_retrieve_agents_params.py (100%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_retrieve_agents_response.py (93%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_retrieve_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_update_params.py (90%) rename src/digitalocean_genai_sdk/types/{genai => }/providers/openai/key_update_response.py (89%) rename src/digitalocean_genai_sdk/types/{genai_retrieve_regions_params.py => region_list_params.py} (77%) rename src/digitalocean_genai_sdk/types/{genai_retrieve_regions_response.py => region_list_response.py} (81%) rename tests/api_resources/{genai => agents}/__init__.py (100%) rename tests/api_resources/{genai => }/agents/test_api_keys.py (83%) rename tests/api_resources/{genai => }/agents/test_child_agents.py (84%) rename tests/api_resources/{genai => }/agents/test_functions.py (85%) rename tests/api_resources/{genai => }/agents/test_knowledge_bases.py (82%) rename tests/api_resources/{genai => }/agents/test_versions.py (83%) rename tests/api_resources/{genai/agents => api_keys}/__init__.py (100%) rename tests/api_resources/{genai/models/test_api_keys.py => api_keys/test_api_keys_.py} (82%) rename tests/api_resources/{genai => }/auth/__init__.py (100%) rename tests/api_resources/{genai => }/auth/agents/__init__.py (100%) rename tests/api_resources/{genai => }/auth/agents/test_token.py (83%) delete mode 100644 tests/api_resources/genai/providers/openai/__init__.py rename tests/api_resources/{genai => }/knowledge_bases/__init__.py (100%) rename tests/api_resources/{genai => }/knowledge_bases/test_data_sources.py (83%) rename tests/api_resources/{genai/models => providers}/__init__.py (100%) rename tests/api_resources/{genai/providers => providers/anthropic}/__init__.py (100%) rename tests/api_resources/{genai => }/providers/anthropic/test_keys.py (81%) rename tests/api_resources/{genai/providers/anthropic => providers/openai}/__init__.py (100%) rename tests/api_resources/{genai => }/providers/openai/test_keys.py (81%) rename tests/api_resources/{genai => }/test_agents.py (85%) rename tests/api_resources/{genai/test_models.py => test_api_keys.py} (64%) delete mode 100644 tests/api_resources/test_genai.py rename tests/api_resources/{genai => }/test_indexing_jobs.py (82%) rename tests/api_resources/{genai => }/test_knowledge_bases.py (84%) create mode 100644 tests/api_resources/test_regions.py diff --git a/.stats.yml b/.stats.yml index 8cfc80f5..0c4b9912 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 43595338207b5728be143c6184c8285e +config_hash: 25c073783b334ca5170ad34fa03ed57f diff --git a/README.md b/README.md index 8ea43f55..81df1124 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ client = DigitaloceanGenaiSDK( ), # This is the default and can be omitted ) -versions = client.genai.agents.versions.list( +versions = client.agents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -64,7 +64,7 @@ client = AsyncDigitaloceanGenaiSDK( async def main() -> None: - versions = await client.genai.agents.versions.list( + versions = await client.agents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -93,7 +93,7 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() -data_source = client.genai.knowledge_bases.data_sources.create( +data_source = client.knowledge_bases.data_sources.create( path_knowledge_base_uuid="knowledge_base_uuid", aws_data_source={}, ) @@ -116,7 +116,7 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() try: - client.genai.agents.versions.list( + client.agents.versions.list( uuid="REPLACE_ME", ) except digitalocean_genai_sdk.APIConnectionError as e: @@ -161,7 +161,7 @@ client = DigitaloceanGenaiSDK( ) # Or, configure per-request: -client.with_options(max_retries=5).genai.agents.versions.list( +client.with_options(max_retries=5).agents.versions.list( uuid="REPLACE_ME", ) ``` @@ -186,7 +186,7 @@ client = DigitaloceanGenaiSDK( ) # Override per-request: -client.with_options(timeout=5.0).genai.agents.versions.list( +client.with_options(timeout=5.0).agents.versions.list( uuid="REPLACE_ME", ) ``` @@ -229,12 +229,12 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from digitalocean_genai_sdk import DigitaloceanGenaiSDK client = DigitaloceanGenaiSDK() -response = client.genai.agents.versions.with_raw_response.list( +response = client.agents.versions.with_raw_response.list( uuid="REPLACE_ME", ) print(response.headers.get('X-My-Header')) -version = response.parse() # get the object that `genai.agents.versions.list()` would have returned +version = response.parse() # get the object that `agents.versions.list()` would have returned print(version.agent_versions) ``` @@ -249,7 +249,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.genai.agents.versions.with_streaming_response.list( +with client.agents.versions.with_streaming_response.list( uuid="REPLACE_ME", ) as response: print(response.headers.get("X-My-Header")) diff --git a/api.md b/api.md index c65f46b7..5457215d 100644 --- a/api.md +++ b/api.md @@ -1,21 +1,9 @@ -# Genai +# Agents Types: ```python -from digitalocean_genai_sdk.types import GenaiRetrieveRegionsResponse -``` - -Methods: - -- client.genai.retrieve_regions(\*\*params) -> GenaiRetrieveRegionsResponse - -## Agents - -Types: - -```python -from digitalocean_genai_sdk.types.genai import ( +from digitalocean_genai_sdk.types import ( APIAgent, APIDeploymentVisibility, APIModel, @@ -31,19 +19,19 @@ from digitalocean_genai_sdk.types.genai import ( Methods: -- client.genai.agents.create(\*\*params) -> AgentCreateResponse -- client.genai.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.genai.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.genai.agents.list(\*\*params) -> AgentListResponse -- client.genai.agents.delete(uuid) -> AgentDeleteResponse -- client.genai.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse -### APIKeys +## APIKeys Types: ```python -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( APIAgentAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -55,18 +43,18 @@ from digitalocean_genai_sdk.types.genai.agents import ( Methods: -- client.genai.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.genai.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.genai.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.genai.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.genai.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse -### Functions +## Functions Types: ```python -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -75,16 +63,16 @@ from digitalocean_genai_sdk.types.genai.agents import ( Methods: -- client.genai.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.genai.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.genai.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse -### Versions +## Versions Types: ```python -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( APILinks, APIMeta, VersionUpdateResponse, @@ -94,15 +82,15 @@ from digitalocean_genai_sdk.types.genai.agents import ( Methods: -- client.genai.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.genai.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse -### KnowledgeBases +## KnowledgeBases Types: ```python -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse, ) @@ -110,16 +98,16 @@ from digitalocean_genai_sdk.types.genai.agents import ( Methods: -- client.genai.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.genai.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.genai.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse -### ChildAgents +## ChildAgents Types: ```python -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( ChildAgentUpdateResponse, ChildAgentDeleteResponse, ChildAgentAddResponse, @@ -129,21 +117,21 @@ from digitalocean_genai_sdk.types.genai.agents import ( Methods: -- client.genai.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.genai.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.genai.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.genai.agents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse -## Providers +# Providers -### Anthropic +## Anthropic -#### Keys +### Keys Types: ```python -from digitalocean_genai_sdk.types.genai.providers.anthropic import ( +from digitalocean_genai_sdk.types.providers.anthropic import ( APIAnthropicAPIKeyInfo, KeyCreateResponse, KeyRetrieveResponse, @@ -156,21 +144,21 @@ from digitalocean_genai_sdk.types.genai.providers.anthropic import ( Methods: -- client.genai.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.genai.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.genai.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.genai.providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.genai.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.genai.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse -### OpenAI +## OpenAI -#### Keys +### Keys Types: ```python -from digitalocean_genai_sdk.types.genai.providers.openai import ( +from digitalocean_genai_sdk.types.providers.openai import ( APIOpenAIAPIKeyInfo, KeyCreateResponse, KeyRetrieveResponse, @@ -183,35 +171,47 @@ from digitalocean_genai_sdk.types.genai.providers.openai import ( Methods: -- client.genai.providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.genai.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.genai.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.genai.providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.genai.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.genai.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse +- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse -## Auth +# Auth -### Agents +## Agents -#### Token +### Token Types: ```python -from digitalocean_genai_sdk.types.genai.auth.agents import TokenCreateResponse +from digitalocean_genai_sdk.types.auth.agents import TokenCreateResponse ``` Methods: -- client.genai.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse +- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse -## IndexingJobs +# Regions Types: ```python -from digitalocean_genai_sdk.types.genai import ( +from digitalocean_genai_sdk.types import RegionListResponse +``` + +Methods: + +- client.regions.list(\*\*params) -> RegionListResponse + +# IndexingJobs + +Types: + +```python +from digitalocean_genai_sdk.types import ( APIIndexingJob, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -223,18 +223,18 @@ from digitalocean_genai_sdk.types.genai import ( Methods: -- client.genai.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.genai.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.genai.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.genai.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.genai.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse -## KnowledgeBases +# KnowledgeBases Types: ```python -from digitalocean_genai_sdk.types.genai import ( +from digitalocean_genai_sdk.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -246,18 +246,18 @@ from digitalocean_genai_sdk.types.genai import ( Methods: -- client.genai.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.genai.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.genai.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.genai.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.genai.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse -### DataSources +## DataSources Types: ```python -from digitalocean_genai_sdk.types.genai.knowledge_bases import ( +from digitalocean_genai_sdk.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -270,28 +270,28 @@ from digitalocean_genai_sdk.types.genai.knowledge_bases import ( Methods: -- client.genai.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.genai.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.genai.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse -## Models +# APIKeys Types: ```python -from digitalocean_genai_sdk.types.genai import APIAgreement, APIModelVersion, ModelListResponse +from digitalocean_genai_sdk.types import APIAgreement, APIModelVersion, APIKeyListResponse ``` Methods: -- client.genai.models.list(\*\*params) -> ModelListResponse +- client.api_keys.list(\*\*params) -> APIKeyListResponse -### APIKeys +## APIKeys Types: ```python -from digitalocean_genai_sdk.types.genai.models import ( +from digitalocean_genai_sdk.types.api_keys import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -303,11 +303,11 @@ from digitalocean_genai_sdk.types.genai.models import ( Methods: -- client.genai.models.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.genai.models.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.genai.models.api_keys.list(\*\*params) -> APIKeyListResponse -- client.genai.models.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.genai.models.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse +- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Chat diff --git a/src/digitalocean_genai_sdk/_client.py b/src/digitalocean_genai_sdk/_client.py index cd3ecf4e..2f86bb7d 100644 --- a/src/digitalocean_genai_sdk/_client.py +++ b/src/digitalocean_genai_sdk/_client.py @@ -31,11 +31,28 @@ ) if TYPE_CHECKING: - from .resources import chat, genai, models, embeddings + from .resources import ( + auth, + chat, + agents, + models, + regions, + api_keys, + providers, + embeddings, + indexing_jobs, + knowledge_bases, + ) from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource + from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.auth.auth import AuthResource, AsyncAuthResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource - from .resources.genai.genai import GenaiResource, AsyncGenaiResource + from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource + from .resources.providers.providers import ProvidersResource, AsyncProvidersResource + from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ "Timeout", @@ -105,10 +122,46 @@ def __init__( ) @cached_property - def genai(self) -> GenaiResource: - from .resources.genai import GenaiResource + def agents(self) -> AgentsResource: + from .resources.agents import AgentsResource - return GenaiResource(self) + return AgentsResource(self) + + @cached_property + def providers(self) -> ProvidersResource: + from .resources.providers import ProvidersResource + + return ProvidersResource(self) + + @cached_property + def auth(self) -> AuthResource: + from .resources.auth import AuthResource + + return AuthResource(self) + + @cached_property + def regions(self) -> RegionsResource: + from .resources.regions import RegionsResource + + return RegionsResource(self) + + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + from .resources.indexing_jobs import IndexingJobsResource + + return IndexingJobsResource(self) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + from .resources.knowledge_bases import KnowledgeBasesResource + + return KnowledgeBasesResource(self) + + @cached_property + def api_keys(self) -> APIKeysResource: + from .resources.api_keys import APIKeysResource + + return APIKeysResource(self) @cached_property def chat(self) -> ChatResource: @@ -297,10 +350,46 @@ def __init__( ) @cached_property - def genai(self) -> AsyncGenaiResource: - from .resources.genai import AsyncGenaiResource + def agents(self) -> AsyncAgentsResource: + from .resources.agents import AsyncAgentsResource + + return AsyncAgentsResource(self) + + @cached_property + def providers(self) -> AsyncProvidersResource: + from .resources.providers import AsyncProvidersResource + + return AsyncProvidersResource(self) + + @cached_property + def auth(self) -> AsyncAuthResource: + from .resources.auth import AsyncAuthResource + + return AsyncAuthResource(self) + + @cached_property + def regions(self) -> AsyncRegionsResource: + from .resources.regions import AsyncRegionsResource - return AsyncGenaiResource(self) + return AsyncRegionsResource(self) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + from .resources.indexing_jobs import AsyncIndexingJobsResource + + return AsyncIndexingJobsResource(self) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + from .resources.knowledge_bases import AsyncKnowledgeBasesResource + + return AsyncKnowledgeBasesResource(self) + + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + from .resources.api_keys import AsyncAPIKeysResource + + return AsyncAPIKeysResource(self) @cached_property def chat(self) -> AsyncChatResource: @@ -440,10 +529,46 @@ def __init__(self, client: DigitaloceanGenaiSDK) -> None: self._client = client @cached_property - def genai(self) -> genai.GenaiResourceWithRawResponse: - from .resources.genai import GenaiResourceWithRawResponse + def agents(self) -> agents.AgentsResourceWithRawResponse: + from .resources.agents import AgentsResourceWithRawResponse + + return AgentsResourceWithRawResponse(self._client.agents) + + @cached_property + def providers(self) -> providers.ProvidersResourceWithRawResponse: + from .resources.providers import ProvidersResourceWithRawResponse + + return ProvidersResourceWithRawResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AuthResourceWithRawResponse: + from .resources.auth import AuthResourceWithRawResponse + + return AuthResourceWithRawResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.RegionsResourceWithRawResponse: + from .resources.regions import RegionsResourceWithRawResponse + + return RegionsResourceWithRawResponse(self._client.regions) + + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse + + return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse + + return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: + from .resources.api_keys import APIKeysResourceWithRawResponse - return GenaiResourceWithRawResponse(self._client.genai) + return APIKeysResourceWithRawResponse(self._client.api_keys) @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: @@ -471,10 +596,46 @@ def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: self._client = client @cached_property - def genai(self) -> genai.AsyncGenaiResourceWithRawResponse: - from .resources.genai import AsyncGenaiResourceWithRawResponse + def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: + from .resources.agents import AsyncAgentsResourceWithRawResponse - return AsyncGenaiResourceWithRawResponse(self._client.genai) + return AsyncAgentsResourceWithRawResponse(self._client.agents) + + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: + from .resources.providers import AsyncProvidersResourceWithRawResponse + + return AsyncProvidersResourceWithRawResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AsyncAuthResourceWithRawResponse: + from .resources.auth import AsyncAuthResourceWithRawResponse + + return AsyncAuthResourceWithRawResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: + from .resources.regions import AsyncRegionsResourceWithRawResponse + + return AsyncRegionsResourceWithRawResponse(self._client.regions) + + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse + + return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse + + return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: + from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse + + return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: @@ -502,10 +663,46 @@ def __init__(self, client: DigitaloceanGenaiSDK) -> None: self._client = client @cached_property - def genai(self) -> genai.GenaiResourceWithStreamingResponse: - from .resources.genai import GenaiResourceWithStreamingResponse + def agents(self) -> agents.AgentsResourceWithStreamingResponse: + from .resources.agents import AgentsResourceWithStreamingResponse + + return AgentsResourceWithStreamingResponse(self._client.agents) + + @cached_property + def providers(self) -> providers.ProvidersResourceWithStreamingResponse: + from .resources.providers import ProvidersResourceWithStreamingResponse + + return ProvidersResourceWithStreamingResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AuthResourceWithStreamingResponse: + from .resources.auth import AuthResourceWithStreamingResponse + + return AuthResourceWithStreamingResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.RegionsResourceWithStreamingResponse: + from .resources.regions import RegionsResourceWithStreamingResponse - return GenaiResourceWithStreamingResponse(self._client.genai) + return RegionsResourceWithStreamingResponse(self._client.regions) + + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse + + return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse + + return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: + from .resources.api_keys import APIKeysResourceWithStreamingResponse + + return APIKeysResourceWithStreamingResponse(self._client.api_keys) @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: @@ -533,10 +730,46 @@ def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: self._client = client @cached_property - def genai(self) -> genai.AsyncGenaiResourceWithStreamingResponse: - from .resources.genai import AsyncGenaiResourceWithStreamingResponse + def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: + from .resources.agents import AsyncAgentsResourceWithStreamingResponse + + return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: + from .resources.providers import AsyncProvidersResourceWithStreamingResponse + + return AsyncProvidersResourceWithStreamingResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: + from .resources.auth import AsyncAuthResourceWithStreamingResponse + + return AsyncAuthResourceWithStreamingResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: + from .resources.regions import AsyncRegionsResourceWithStreamingResponse + + return AsyncRegionsResourceWithStreamingResponse(self._client.regions) + + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse + + return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse + + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: + from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse - return AsyncGenaiResourceWithStreamingResponse(self._client.genai) + return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py index c4e2a67e..6dcbff02 100644 --- a/src/digitalocean_genai_sdk/resources/__init__.py +++ b/src/digitalocean_genai_sdk/resources/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) from .chat import ( ChatResource, AsyncChatResource, @@ -8,13 +16,13 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) -from .genai import ( - GenaiResource, - AsyncGenaiResource, - GenaiResourceWithRawResponse, - AsyncGenaiResourceWithRawResponse, - GenaiResourceWithStreamingResponse, - AsyncGenaiResourceWithStreamingResponse, +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, ) from .models import ( ModelsResource, @@ -24,6 +32,30 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) +from .regions import ( + RegionsResource, + AsyncRegionsResource, + RegionsResourceWithRawResponse, + AsyncRegionsResourceWithRawResponse, + RegionsResourceWithStreamingResponse, + AsyncRegionsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) from .embeddings import ( EmbeddingsResource, AsyncEmbeddingsResource, @@ -32,14 +64,66 @@ EmbeddingsResourceWithStreamingResponse, AsyncEmbeddingsResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) __all__ = [ - "GenaiResource", - "AsyncGenaiResource", - "GenaiResourceWithRawResponse", - "AsyncGenaiResourceWithRawResponse", - "GenaiResourceWithStreamingResponse", - "AsyncGenaiResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", + "RegionsResource", + "AsyncRegionsResource", + "RegionsResourceWithRawResponse", + "AsyncRegionsResourceWithRawResponse", + "RegionsResourceWithStreamingResponse", + "AsyncRegionsResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/__init__.py b/src/digitalocean_genai_sdk/resources/agents/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/agents/__init__.py rename to src/digitalocean_genai_sdk/resources/agents/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/agents.py b/src/digitalocean_genai_sdk/resources/agents/agents.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/genai/agents/agents.py rename to src/digitalocean_genai_sdk/resources/agents/agents.py index 1b209988..2c0a11ed 100644 --- a/src/digitalocean_genai_sdk/resources/genai/agents/agents.py +++ b/src/digitalocean_genai_sdk/resources/agents/agents.py @@ -6,6 +6,16 @@ import httpx +from ...types import ( + APIRetrievalMethod, + APIDeploymentVisibility, + agent_list_params, + agent_create_params, + agent_update_params, + agent_update_status_params, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -22,8 +32,7 @@ VersionsResourceWithStreamingResponse, AsyncVersionsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property from .functions import ( FunctionsResource, AsyncFunctionsResource, @@ -32,9 +41,8 @@ FunctionsResourceWithStreamingResponse, AsyncFunctionsResourceWithStreamingResponse, ) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, @@ -48,15 +56,7 @@ ChildAgentsResourceWithStreamingResponse, AsyncChildAgentsResourceWithStreamingResponse, ) -from ....types.genai import ( - APIRetrievalMethod, - APIDeploymentVisibility, - agent_list_params, - agent_create_params, - agent_update_params, - agent_update_status_params, -) -from ...._base_client import make_request_options +from ..._base_client import make_request_options from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -65,14 +65,14 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) -from ....types.genai.agent_list_response import AgentListResponse -from ....types.genai.api_retrieval_method import APIRetrievalMethod -from ....types.genai.agent_create_response import AgentCreateResponse -from ....types.genai.agent_delete_response import AgentDeleteResponse -from ....types.genai.agent_update_response import AgentUpdateResponse -from ....types.genai.agent_retrieve_response import AgentRetrieveResponse -from ....types.genai.api_deployment_visibility import APIDeploymentVisibility -from ....types.genai.agent_update_status_response import AgentUpdateStatusResponse +from ...types.agent_list_response import AgentListResponse +from ...types.api_retrieval_method import APIRetrievalMethod +from ...types.agent_create_response import AgentCreateResponse +from ...types.agent_delete_response import AgentDeleteResponse +from ...types.agent_update_response import AgentUpdateResponse +from ...types.agent_retrieve_response import AgentRetrieveResponse +from ...types.api_deployment_visibility import APIDeploymentVisibility +from ...types.agent_update_status_response import AgentUpdateStatusResponse __all__ = ["AgentsResource", "AsyncAgentsResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py b/src/digitalocean_genai_sdk/resources/agents/api_keys.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py rename to src/digitalocean_genai_sdk/resources/agents/api_keys.py index 940be6d7..bf4adb26 100644 --- a/src/digitalocean_genai_sdk/resources/genai/agents/api_keys.py +++ b/src/digitalocean_genai_sdk/resources/agents/api_keys.py @@ -4,23 +4,23 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.agents import api_key_list_params, api_key_create_params, api_key_update_params -from ....types.genai.agents.api_key_list_response import APIKeyListResponse -from ....types.genai.agents.api_key_create_response import APIKeyCreateResponse -from ....types.genai.agents.api_key_delete_response import APIKeyDeleteResponse -from ....types.genai.agents.api_key_update_response import APIKeyUpdateResponse -from ....types.genai.agents.api_key_regenerate_response import APIKeyRegenerateResponse +from ..._base_client import make_request_options +from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.agents.api_key_list_response import APIKeyListResponse +from ...types.agents.api_key_create_response import APIKeyCreateResponse +from ...types.agents.api_key_delete_response import APIKeyDeleteResponse +from ...types.agents.api_key_update_response import APIKeyUpdateResponse +from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py b/src/digitalocean_genai_sdk/resources/agents/child_agents.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py rename to src/digitalocean_genai_sdk/resources/agents/child_agents.py index 75fb267e..ad2427f9 100644 --- a/src/digitalocean_genai_sdk/resources/genai/agents/child_agents.py +++ b/src/digitalocean_genai_sdk/resources/agents/child_agents.py @@ -4,22 +4,22 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.agents import child_agent_add_params, child_agent_update_params -from ....types.genai.agents.child_agent_add_response import ChildAgentAddResponse -from ....types.genai.agents.child_agent_view_response import ChildAgentViewResponse -from ....types.genai.agents.child_agent_delete_response import ChildAgentDeleteResponse -from ....types.genai.agents.child_agent_update_response import ChildAgentUpdateResponse +from ..._base_client import make_request_options +from ...types.agents import child_agent_add_params, child_agent_update_params +from ...types.agents.child_agent_add_response import ChildAgentAddResponse +from ...types.agents.child_agent_view_response import ChildAgentViewResponse +from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse __all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/functions.py b/src/digitalocean_genai_sdk/resources/agents/functions.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/agents/functions.py rename to src/digitalocean_genai_sdk/resources/agents/functions.py index cf18aad5..a1025806 100644 --- a/src/digitalocean_genai_sdk/resources/genai/agents/functions.py +++ b/src/digitalocean_genai_sdk/resources/agents/functions.py @@ -4,21 +4,21 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.agents import function_create_params, function_update_params -from ....types.genai.agents.function_create_response import FunctionCreateResponse -from ....types.genai.agents.function_delete_response import FunctionDeleteResponse -from ....types.genai.agents.function_update_response import FunctionUpdateResponse +from ..._base_client import make_request_options +from ...types.agents import function_create_params, function_update_params +from ...types.agents.function_create_response import FunctionCreateResponse +from ...types.agents.function_delete_response import FunctionDeleteResponse +from ...types.agents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py rename to src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py index ed9fed4f..ba190305 100644 --- a/src/digitalocean_genai_sdk/resources/genai/agents/knowledge_bases.py +++ b/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py @@ -4,18 +4,18 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ....types.genai.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse +from ..._base_client import make_request_options +from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/agents/versions.py b/src/digitalocean_genai_sdk/resources/agents/versions.py similarity index 95% rename from src/digitalocean_genai_sdk/resources/genai/agents/versions.py rename to src/digitalocean_genai_sdk/resources/agents/versions.py index 5c0b6826..af4597d1 100644 --- a/src/digitalocean_genai_sdk/resources/genai/agents/versions.py +++ b/src/digitalocean_genai_sdk/resources/agents/versions.py @@ -4,20 +4,20 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.agents import version_list_params, version_update_params -from ....types.genai.agents.version_list_response import VersionListResponse -from ....types.genai.agents.version_update_response import VersionUpdateResponse +from ..._base_client import make_request_options +from ...types.agents import version_list_params, version_update_params +from ...types.agents.version_list_response import VersionListResponse +from ...types.agents.version_update_response import VersionUpdateResponse __all__ = ["VersionsResource", "AsyncVersionsResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/models/__init__.py b/src/digitalocean_genai_sdk/resources/api_keys/__init__.py similarity index 55% rename from src/digitalocean_genai_sdk/resources/genai/models/__init__.py rename to src/digitalocean_genai_sdk/resources/api_keys/__init__.py index d7dd650c..ed14565c 100644 --- a/src/digitalocean_genai_sdk/resources/genai/models/__init__.py +++ b/src/digitalocean_genai_sdk/resources/api_keys/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -24,10 +16,4 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", ] diff --git a/src/digitalocean_genai_sdk/resources/genai/models/models.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py similarity index 72% rename from src/digitalocean_genai_sdk/resources/genai/models/models.py rename to src/digitalocean_genai_sdk/resources/api_keys/api_keys.py index 6273086b..e55b3051 100644 --- a/src/digitalocean_genai_sdk/resources/genai/models/models.py +++ b/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py @@ -7,54 +7,47 @@ import httpx -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from . import api_keys_ as api_keys +from ...types import api_key_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ....types.genai import model_list_params -from ...._base_client import make_request_options -from ....types.genai.model_list_response import ModelListResponse +from ..._base_client import make_request_options +from ...types.api_key_list_response import APIKeyListResponse -__all__ = ["ModelsResource", "AsyncModelsResource"] +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] -class ModelsResource(SyncAPIResource): +class APIKeysResource(SyncAPIResource): @cached_property - def api_keys(self) -> APIKeysResource: - return APIKeysResource(self._client) + def api_keys(self) -> api_keys.APIKeysResource: + return api_keys.APIKeysResource(self._client) @cached_property - def with_raw_response(self) -> ModelsResourceWithRawResponse: + def with_raw_response(self) -> APIKeysResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ - return ModelsResourceWithRawResponse(self) + return APIKeysResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ - return ModelsResourceWithStreamingResponse(self) + return APIKeysResourceWithStreamingResponse(self) def list( self, @@ -80,7 +73,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> APIKeyListResponse: """ To list all models, send a GET request to `/v2/gen-ai/models`. @@ -124,36 +117,36 @@ def list( "public_only": public_only, "usecases": usecases, }, - model_list_params.ModelListParams, + api_key_list_params.APIKeyListParams, ), ), - cast_to=ModelListResponse, + cast_to=APIKeyListResponse, ) -class AsyncModelsResource(AsyncAPIResource): +class AsyncAPIKeysResource(AsyncAPIResource): @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - return AsyncAPIKeysResource(self._client) + def api_keys(self) -> api_keys.AsyncAPIKeysResource: + return api_keys.AsyncAPIKeysResource(self._client) @cached_property - def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers """ - return AsyncModelsResourceWithRawResponse(self) + return AsyncAPIKeysResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response """ - return AsyncModelsResourceWithStreamingResponse(self) + return AsyncAPIKeysResourceWithStreamingResponse(self) async def list( self, @@ -179,7 +172,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> APIKeyListResponse: """ To list all models, send a GET request to `/v2/gen-ai/models`. @@ -223,60 +216,60 @@ async def list( "public_only": public_only, "usecases": usecases, }, - model_list_params.ModelListParams, + api_key_list_params.APIKeyListParams, ), ), - cast_to=ModelListResponse, + cast_to=APIKeyListResponse, ) -class ModelsResourceWithRawResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys self.list = to_raw_response_wrapper( - models.list, + api_keys.list, ) @cached_property - def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._models.api_keys) + def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: + return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) -class AsyncModelsResourceWithRawResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys self.list = async_to_raw_response_wrapper( - models.list, + api_keys.list, ) @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._models.api_keys) + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: + return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) -class ModelsResourceWithStreamingResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys self.list = to_streamed_response_wrapper( - models.list, + api_keys.list, ) @cached_property - def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._models.api_keys) + def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: + return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) -class AsyncModelsResourceWithStreamingResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys self.list = async_to_streamed_response_wrapper( - models.list, + api_keys.list, ) @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._models.api_keys) + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: + return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/digitalocean_genai_sdk/resources/genai/models/api_keys.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/models/api_keys.py rename to src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py index 58a16be2..3fc2f4f7 100644 --- a/src/digitalocean_genai_sdk/resources/genai/models/api_keys.py +++ b/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py @@ -4,23 +4,23 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.models import api_key_list_params, api_key_create_params, api_key_update_params -from ....types.genai.models.api_key_list_response import APIKeyListResponse -from ....types.genai.models.api_key_create_response import APIKeyCreateResponse -from ....types.genai.models.api_key_delete_response import APIKeyDeleteResponse -from ....types.genai.models.api_key_update_response import APIKeyUpdateResponse -from ....types.genai.models.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse +from ..._base_client import make_request_options +from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.api_keys.api_key_list_response import APIKeyListResponse +from ...types.api_keys.api_key_create_response import APIKeyCreateResponse +from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse +from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse +from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/__init__.py b/src/digitalocean_genai_sdk/resources/auth/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/auth/__init__.py rename to src/digitalocean_genai_sdk/resources/auth/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/agents/__init__.py b/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/auth/agents/__init__.py rename to src/digitalocean_genai_sdk/resources/auth/agents/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py b/src/digitalocean_genai_sdk/resources/auth/agents/agents.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py rename to src/digitalocean_genai_sdk/resources/auth/agents/agents.py index 7a7520fe..3a5ba673 100644 --- a/src/digitalocean_genai_sdk/resources/genai/auth/agents/agents.py +++ b/src/digitalocean_genai_sdk/resources/auth/agents/agents.py @@ -10,8 +10,8 @@ TokenResourceWithStreamingResponse, AsyncTokenResourceWithStreamingResponse, ) -from ....._compat import cached_property -from ....._resource import SyncAPIResource, AsyncAPIResource +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource __all__ = ["AgentsResource", "AsyncAgentsResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py b/src/digitalocean_genai_sdk/resources/auth/agents/token.py similarity index 93% rename from src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py rename to src/digitalocean_genai_sdk/resources/auth/agents/token.py index 491258af..89caaf10 100644 --- a/src/digitalocean_genai_sdk/resources/genai/auth/agents/token.py +++ b/src/digitalocean_genai_sdk/resources/auth/agents/token.py @@ -4,19 +4,19 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ....._utils import maybe_transform, async_maybe_transform -from ....._compat import cached_property -from ....._resource import SyncAPIResource, AsyncAPIResource -from ....._response import ( +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ....._base_client import make_request_options -from .....types.genai.auth.agents import token_create_params -from .....types.genai.auth.agents.token_create_response import TokenCreateResponse +from ...._base_client import make_request_options +from ....types.auth.agents import token_create_params +from ....types.auth.agents.token_create_response import TokenCreateResponse __all__ = ["TokenResource", "AsyncTokenResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/auth/auth.py b/src/digitalocean_genai_sdk/resources/auth/auth.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/genai/auth/auth.py rename to src/digitalocean_genai_sdk/resources/auth/auth.py index 5de06f71..854ac636 100644 --- a/src/digitalocean_genai_sdk/resources/genai/auth/auth.py +++ b/src/digitalocean_genai_sdk/resources/auth/auth.py @@ -2,8 +2,8 @@ from __future__ import annotations -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource from .agents.agents import ( AgentsResource, AsyncAgentsResource, diff --git a/src/digitalocean_genai_sdk/resources/genai/__init__.py b/src/digitalocean_genai_sdk/resources/genai/__init__.py deleted file mode 100644 index 010339f5..00000000 --- a/src/digitalocean_genai_sdk/resources/genai/__init__.py +++ /dev/null @@ -1,103 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) -from .genai import ( - GenaiResource, - AsyncGenaiResource, - GenaiResourceWithRawResponse, - AsyncGenaiResourceWithRawResponse, - GenaiResourceWithStreamingResponse, - AsyncGenaiResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", - "GenaiResource", - "AsyncGenaiResource", - "GenaiResourceWithRawResponse", - "AsyncGenaiResourceWithRawResponse", - "GenaiResourceWithStreamingResponse", - "AsyncGenaiResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/genai/genai.py b/src/digitalocean_genai_sdk/resources/genai/genai.py deleted file mode 100644 index 12a38e42..00000000 --- a/src/digitalocean_genai_sdk/resources/genai/genai.py +++ /dev/null @@ -1,383 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...types import genai_retrieve_regions_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from .auth.auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .agents.agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) -from .models.models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) -from ..._base_client import make_request_options -from .providers.providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) -from .knowledge_bases.knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) -from ...types.genai_retrieve_regions_response import GenaiRetrieveRegionsResponse - -__all__ = ["GenaiResource", "AsyncGenaiResource"] - - -class GenaiResource(SyncAPIResource): - @cached_property - def agents(self) -> AgentsResource: - return AgentsResource(self._client) - - @cached_property - def providers(self) -> ProvidersResource: - return ProvidersResource(self._client) - - @cached_property - def auth(self) -> AuthResource: - return AuthResource(self._client) - - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - return IndexingJobsResource(self._client) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResource: - return KnowledgeBasesResource(self._client) - - @cached_property - def models(self) -> ModelsResource: - return ModelsResource(self._client) - - @cached_property - def with_raw_response(self) -> GenaiResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers - """ - return GenaiResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> GenaiResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response - """ - return GenaiResourceWithStreamingResponse(self) - - def retrieve_regions( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> GenaiRetrieveRegionsResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - genai_retrieve_regions_params.GenaiRetrieveRegionsParams, - ), - ), - cast_to=GenaiRetrieveRegionsResponse, - ) - - -class AsyncGenaiResource(AsyncAPIResource): - @cached_property - def agents(self) -> AsyncAgentsResource: - return AsyncAgentsResource(self._client) - - @cached_property - def providers(self) -> AsyncProvidersResource: - return AsyncProvidersResource(self._client) - - @cached_property - def auth(self) -> AsyncAuthResource: - return AsyncAuthResource(self._client) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - return AsyncIndexingJobsResource(self._client) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResource: - return AsyncKnowledgeBasesResource(self._client) - - @cached_property - def models(self) -> AsyncModelsResource: - return AsyncModelsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncGenaiResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers - """ - return AsyncGenaiResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncGenaiResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response - """ - return AsyncGenaiResourceWithStreamingResponse(self) - - async def retrieve_regions( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> GenaiRetrieveRegionsResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - genai_retrieve_regions_params.GenaiRetrieveRegionsParams, - ), - ), - cast_to=GenaiRetrieveRegionsResponse, - ) - - -class GenaiResourceWithRawResponse: - def __init__(self, genai: GenaiResource) -> None: - self._genai = genai - - self.retrieve_regions = to_raw_response_wrapper( - genai.retrieve_regions, - ) - - @cached_property - def agents(self) -> AgentsResourceWithRawResponse: - return AgentsResourceWithRawResponse(self._genai.agents) - - @cached_property - def providers(self) -> ProvidersResourceWithRawResponse: - return ProvidersResourceWithRawResponse(self._genai.providers) - - @cached_property - def auth(self) -> AuthResourceWithRawResponse: - return AuthResourceWithRawResponse(self._genai.auth) - - @cached_property - def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: - return IndexingJobsResourceWithRawResponse(self._genai.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._genai.knowledge_bases) - - @cached_property - def models(self) -> ModelsResourceWithRawResponse: - return ModelsResourceWithRawResponse(self._genai.models) - - -class AsyncGenaiResourceWithRawResponse: - def __init__(self, genai: AsyncGenaiResource) -> None: - self._genai = genai - - self.retrieve_regions = async_to_raw_response_wrapper( - genai.retrieve_regions, - ) - - @cached_property - def agents(self) -> AsyncAgentsResourceWithRawResponse: - return AsyncAgentsResourceWithRawResponse(self._genai.agents) - - @cached_property - def providers(self) -> AsyncProvidersResourceWithRawResponse: - return AsyncProvidersResourceWithRawResponse(self._genai.providers) - - @cached_property - def auth(self) -> AsyncAuthResourceWithRawResponse: - return AsyncAuthResourceWithRawResponse(self._genai.auth) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: - return AsyncIndexingJobsResourceWithRawResponse(self._genai.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._genai.knowledge_bases) - - @cached_property - def models(self) -> AsyncModelsResourceWithRawResponse: - return AsyncModelsResourceWithRawResponse(self._genai.models) - - -class GenaiResourceWithStreamingResponse: - def __init__(self, genai: GenaiResource) -> None: - self._genai = genai - - self.retrieve_regions = to_streamed_response_wrapper( - genai.retrieve_regions, - ) - - @cached_property - def agents(self) -> AgentsResourceWithStreamingResponse: - return AgentsResourceWithStreamingResponse(self._genai.agents) - - @cached_property - def providers(self) -> ProvidersResourceWithStreamingResponse: - return ProvidersResourceWithStreamingResponse(self._genai.providers) - - @cached_property - def auth(self) -> AuthResourceWithStreamingResponse: - return AuthResourceWithStreamingResponse(self._genai.auth) - - @cached_property - def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: - return IndexingJobsResourceWithStreamingResponse(self._genai.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._genai.knowledge_bases) - - @cached_property - def models(self) -> ModelsResourceWithStreamingResponse: - return ModelsResourceWithStreamingResponse(self._genai.models) - - -class AsyncGenaiResourceWithStreamingResponse: - def __init__(self, genai: AsyncGenaiResource) -> None: - self._genai = genai - - self.retrieve_regions = async_to_streamed_response_wrapper( - genai.retrieve_regions, - ) - - @cached_property - def agents(self) -> AsyncAgentsResourceWithStreamingResponse: - return AsyncAgentsResourceWithStreamingResponse(self._genai.agents) - - @cached_property - def providers(self) -> AsyncProvidersResourceWithStreamingResponse: - return AsyncProvidersResourceWithStreamingResponse(self._genai.providers) - - @cached_property - def auth(self) -> AsyncAuthResourceWithStreamingResponse: - return AsyncAuthResourceWithStreamingResponse(self._genai.auth) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: - return AsyncIndexingJobsResourceWithStreamingResponse(self._genai.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._genai.knowledge_bases) - - @cached_property - def models(self) -> AsyncModelsResourceWithStreamingResponse: - return AsyncModelsResourceWithStreamingResponse(self._genai.models) diff --git a/src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py b/src/digitalocean_genai_sdk/resources/indexing_jobs.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py rename to src/digitalocean_genai_sdk/resources/indexing_jobs.py index 7c9366ad..e0ea9839 100644 --- a/src/digitalocean_genai_sdk/resources/genai/indexing_jobs.py +++ b/src/digitalocean_genai_sdk/resources/indexing_jobs.py @@ -6,23 +6,23 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...types.genai import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params -from ..._base_client import make_request_options -from ...types.genai.indexing_job_list_response import IndexingJobListResponse -from ...types.genai.indexing_job_create_response import IndexingJobCreateResponse -from ...types.genai.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ...types.genai.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ...types.genai.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse +from .._base_client import make_request_options +from ..types.indexing_job_list_response import IndexingJobListResponse +from ..types.indexing_job_create_response import IndexingJobCreateResponse +from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/knowledge_bases/__init__.py rename to src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py similarity index 95% rename from src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py rename to src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py index 3a348b93..2576eaeb 100644 --- a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/data_sources.py +++ b/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py @@ -4,26 +4,26 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.genai.knowledge_bases import ( +from ..._base_client import make_request_options +from ...types.knowledge_bases import ( data_source_list_params, data_source_create_params, ) -from ....types.genai.knowledge_bases.data_source_list_response import DataSourceListResponse -from ....types.genai.knowledge_bases.data_source_create_response import DataSourceCreateResponse -from ....types.genai.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse -from ....types.genai.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam -from ....types.genai.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam +from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse +from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse +from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse +from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam __all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py rename to src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py index 138d6ef8..87687615 100644 --- a/src/digitalocean_genai_sdk/resources/genai/knowledge_bases/knowledge_bases.py +++ b/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py @@ -6,11 +6,12 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, @@ -24,13 +25,12 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) -from ....types.genai import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params -from ...._base_client import make_request_options -from ....types.genai.knowledge_base_list_response import KnowledgeBaseListResponse -from ....types.genai.knowledge_base_create_response import KnowledgeBaseCreateResponse -from ....types.genai.knowledge_base_delete_response import KnowledgeBaseDeleteResponse -from ....types.genai.knowledge_base_update_response import KnowledgeBaseUpdateResponse -from ....types.genai.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse +from ..._base_client import make_request_options +from ...types.knowledge_base_list_response import KnowledgeBaseListResponse +from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse +from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse +from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse +from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/__init__.py b/src/digitalocean_genai_sdk/resources/providers/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/providers/__init__.py rename to src/digitalocean_genai_sdk/resources/providers/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/providers/anthropic/__init__.py rename to src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py rename to src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py index c996e2cc..39565bbf 100644 --- a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/anthropic.py +++ b/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py @@ -10,8 +10,8 @@ KeysResourceWithStreamingResponse, AsyncKeysResourceWithStreamingResponse, ) -from ....._compat import cached_property -from ....._resource import SyncAPIResource, AsyncAPIResource +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource __all__ = ["AnthropicResource", "AsyncAnthropicResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py rename to src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py index e9ee7130..470e43ce 100644 --- a/src/digitalocean_genai_sdk/resources/genai/providers/anthropic/keys.py +++ b/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py @@ -4,29 +4,24 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ....._utils import maybe_transform, async_maybe_transform -from ....._compat import cached_property -from ....._resource import SyncAPIResource, AsyncAPIResource -from ....._response import ( +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ....._base_client import make_request_options -from .....types.genai.providers.anthropic import ( - key_list_params, - key_create_params, - key_update_params, - key_list_agents_params, -) -from .....types.genai.providers.anthropic.key_list_response import KeyListResponse -from .....types.genai.providers.anthropic.key_create_response import KeyCreateResponse -from .....types.genai.providers.anthropic.key_delete_response import KeyDeleteResponse -from .....types.genai.providers.anthropic.key_update_response import KeyUpdateResponse -from .....types.genai.providers.anthropic.key_retrieve_response import KeyRetrieveResponse -from .....types.genai.providers.anthropic.key_list_agents_response import KeyListAgentsResponse +from ...._base_client import make_request_options +from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params +from ....types.providers.anthropic.key_list_response import KeyListResponse +from ....types.providers.anthropic.key_create_response import KeyCreateResponse +from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse +from ....types.providers.anthropic.key_update_response import KeyUpdateResponse +from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse __all__ = ["KeysResource", "AsyncKeysResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/openai/__init__.py b/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/resources/genai/providers/openai/__init__.py rename to src/digitalocean_genai_sdk/resources/providers/openai/__init__.py diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py b/src/digitalocean_genai_sdk/resources/providers/openai/keys.py similarity index 96% rename from src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py rename to src/digitalocean_genai_sdk/resources/providers/openai/keys.py index ae9a3a01..4991d56c 100644 --- a/src/digitalocean_genai_sdk/resources/genai/providers/openai/keys.py +++ b/src/digitalocean_genai_sdk/resources/providers/openai/keys.py @@ -4,29 +4,24 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ....._utils import maybe_transform, async_maybe_transform -from ....._compat import cached_property -from ....._resource import SyncAPIResource, AsyncAPIResource -from ....._response import ( +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ....._base_client import make_request_options -from .....types.genai.providers.openai import ( - key_list_params, - key_create_params, - key_update_params, - key_retrieve_agents_params, -) -from .....types.genai.providers.openai.key_list_response import KeyListResponse -from .....types.genai.providers.openai.key_create_response import KeyCreateResponse -from .....types.genai.providers.openai.key_delete_response import KeyDeleteResponse -from .....types.genai.providers.openai.key_update_response import KeyUpdateResponse -from .....types.genai.providers.openai.key_retrieve_response import KeyRetrieveResponse -from .....types.genai.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse +from ...._base_client import make_request_options +from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params +from ....types.providers.openai.key_list_response import KeyListResponse +from ....types.providers.openai.key_create_response import KeyCreateResponse +from ....types.providers.openai.key_delete_response import KeyDeleteResponse +from ....types.providers.openai.key_update_response import KeyUpdateResponse +from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse __all__ = ["KeysResource", "AsyncKeysResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py b/src/digitalocean_genai_sdk/resources/providers/openai/openai.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py rename to src/digitalocean_genai_sdk/resources/providers/openai/openai.py index a0c5a373..89083056 100644 --- a/src/digitalocean_genai_sdk/resources/genai/providers/openai/openai.py +++ b/src/digitalocean_genai_sdk/resources/providers/openai/openai.py @@ -10,8 +10,8 @@ KeysResourceWithStreamingResponse, AsyncKeysResourceWithStreamingResponse, ) -from ....._compat import cached_property -from ....._resource import SyncAPIResource, AsyncAPIResource +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource __all__ = ["OpenAIResource", "AsyncOpenAIResource"] diff --git a/src/digitalocean_genai_sdk/resources/genai/providers/providers.py b/src/digitalocean_genai_sdk/resources/providers/providers.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/genai/providers/providers.py rename to src/digitalocean_genai_sdk/resources/providers/providers.py index fa3262d7..d0963ce6 100644 --- a/src/digitalocean_genai_sdk/resources/genai/providers/providers.py +++ b/src/digitalocean_genai_sdk/resources/providers/providers.py @@ -2,8 +2,8 @@ from __future__ import annotations -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource from .openai.openai import ( OpenAIResource, AsyncOpenAIResource, diff --git a/src/digitalocean_genai_sdk/resources/regions.py b/src/digitalocean_genai_sdk/resources/regions.py new file mode 100644 index 00000000..5e06213b --- /dev/null +++ b/src/digitalocean_genai_sdk/resources/regions.py @@ -0,0 +1,191 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..types import region_list_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.region_list_response import RegionListResponse + +__all__ = ["RegionsResource", "AsyncRegionsResource"] + + +class RegionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return RegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return RegionsResourceWithStreamingResponse(self) + + def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/genai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class AsyncRegionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + """ + return AsyncRegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + """ + return AsyncRegionsResourceWithStreamingResponse(self) + + async def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/genai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class RegionsResourceWithRawResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_raw_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithRawResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_raw_response_wrapper( + regions.list, + ) + + +class RegionsResourceWithStreamingResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_streamed_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithStreamingResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_streamed_response_wrapper( + regions.list, + ) diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py index 342f0444..1f81739b 100644 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -3,14 +3,52 @@ from __future__ import annotations from .model import Model as Model +from .api_agent import APIAgent as APIAgent +from .api_model import APIModel as APIModel +from .api_agreement import APIAgreement as APIAgreement +from .api_indexing_job import APIIndexingJob as APIIndexingJob +from .agent_list_params import AgentListParams as AgentListParams +from .api_model_version import APIModelVersion as APIModelVersion +from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .region_list_params import RegionListParams as RegionListParams +from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_update_params import AgentUpdateParams as AgentUpdateParams +from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse +from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .region_list_response import RegionListResponse as RegionListResponse +from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse +from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams +from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse +from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse +from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams +from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse +from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams -from .genai_retrieve_regions_params import GenaiRetrieveRegionsParams as GenaiRetrieveRegionsParams +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse +from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse +from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse +from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse -from .genai_retrieve_regions_response import GenaiRetrieveRegionsResponse as GenaiRetrieveRegionsResponse +from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) from .chat_completion_request_message_content_part_text_param import ( ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, ) diff --git a/src/digitalocean_genai_sdk/types/genai/agent_create_params.py b/src/digitalocean_genai_sdk/types/agent_create_params.py similarity index 96% rename from src/digitalocean_genai_sdk/types/genai/agent_create_params.py rename to src/digitalocean_genai_sdk/types/agent_create_params.py index ad801f17..58b99df7 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_create_params.py +++ b/src/digitalocean_genai_sdk/types/agent_create_params.py @@ -5,7 +5,7 @@ from typing import List from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["AgentCreateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py b/src/digitalocean_genai_sdk/types/agent_create_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agent_create_response.py rename to src/digitalocean_genai_sdk/types/agent_create_response.py index 414dc562..48545fe9 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_create_response.py +++ b/src/digitalocean_genai_sdk/types/agent_create_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["AgentCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py b/src/digitalocean_genai_sdk/types/agent_delete_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agent_delete_response.py rename to src/digitalocean_genai_sdk/types/agent_delete_response.py index aeca20a9..eb1d440d 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_delete_response.py +++ b/src/digitalocean_genai_sdk/types/agent_delete_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["AgentDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_list_params.py b/src/digitalocean_genai_sdk/types/agent_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/agent_list_params.py rename to src/digitalocean_genai_sdk/types/agent_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/agent_list_response.py b/src/digitalocean_genai_sdk/types/agent_list_response.py similarity index 99% rename from src/digitalocean_genai_sdk/types/genai/agent_list_response.py rename to src/digitalocean_genai_sdk/types/agent_list_response.py index 4a7f5f89..4cedbb39 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_list_response.py +++ b/src/digitalocean_genai_sdk/types/agent_list_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel from .api_model import APIModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks diff --git a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py b/src/digitalocean_genai_sdk/types/agent_retrieve_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py rename to src/digitalocean_genai_sdk/types/agent_retrieve_response.py index 7a544dbe..2eed88af 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/agent_retrieve_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["AgentRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_params.py b/src/digitalocean_genai_sdk/types/agent_update_params.py similarity index 98% rename from src/digitalocean_genai_sdk/types/genai/agent_update_params.py rename to src/digitalocean_genai_sdk/types/agent_update_params.py index 7d8b2616..85f9a9c2 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_params.py +++ b/src/digitalocean_genai_sdk/types/agent_update_params.py @@ -5,7 +5,7 @@ from typing import List from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo from .api_retrieval_method import APIRetrievalMethod __all__ = ["AgentUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py b/src/digitalocean_genai_sdk/types/agent_update_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agent_update_response.py rename to src/digitalocean_genai_sdk/types/agent_update_response.py index d626a69a..2948aa1c 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_response.py +++ b/src/digitalocean_genai_sdk/types/agent_update_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["AgentUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py b/src/digitalocean_genai_sdk/types/agent_update_status_params.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py rename to src/digitalocean_genai_sdk/types/agent_update_status_params.py index 675b0043..a0cdc0b9 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_status_params.py +++ b/src/digitalocean_genai_sdk/types/agent_update_status_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo from .api_deployment_visibility import APIDeploymentVisibility __all__ = ["AgentUpdateStatusParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py b/src/digitalocean_genai_sdk/types/agent_update_status_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py rename to src/digitalocean_genai_sdk/types/agent_update_status_response.py index 39c3dd0d..b200f99d 100644 --- a/src/digitalocean_genai_sdk/types/genai/agent_update_status_response.py +++ b/src/digitalocean_genai_sdk/types/agent_update_status_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["AgentUpdateStatusResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/__init__.py b/src/digitalocean_genai_sdk/types/agents/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/agents/__init__.py rename to src/digitalocean_genai_sdk/types/agents/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py b/src/digitalocean_genai_sdk/types/agents/api_agent_api_key_info.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py rename to src/digitalocean_genai_sdk/types/agents/api_agent_api_key_info.py index 92828873..46e9388b 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_agent_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/agents/api_agent_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APIAgentAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py rename to src/digitalocean_genai_sdk/types/agents/api_key_create_params.py index 764a7cd0..c3fc44cd 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_params.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["APIKeyCreateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py rename to src/digitalocean_genai_sdk/types/agents/api_key_create_response.py index cba52af4..608927cd 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_create_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py rename to src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py index b0909300..e114f0d4 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_list_params.py rename to src/digitalocean_genai_sdk/types/agents/api_key_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py rename to src/digitalocean_genai_sdk/types/agents/api_key_list_response.py index 6bba5e35..14c0d15a 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_list_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from .api_meta import APIMeta +from ..._models import BaseModel from .api_links import APILinks -from ...._models import BaseModel from .api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py rename to src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py index a778c2c3..80b8869a 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_regenerate_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyRegenerateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py rename to src/digitalocean_genai_sdk/types/agents/api_key_update_params.py index c426beeb..b49ebb38 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_params.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Required, Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["APIKeyUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py rename to src/digitalocean_genai_sdk/types/agents/api_key_update_response.py index a2974a36..a79c4a36 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_key_update_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py b/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py rename to src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py index b4ce4d94..a38f021b 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_link_knowledge_base_output.py +++ b/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py @@ -4,7 +4,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APILinkKnowledgeBaseOutput"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_links.py b/src/digitalocean_genai_sdk/types/agents/api_links.py similarity index 91% rename from src/digitalocean_genai_sdk/types/genai/agents/api_links.py rename to src/digitalocean_genai_sdk/types/agents/api_links.py index 4205a6e2..b37113f0 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_links.py +++ b/src/digitalocean_genai_sdk/types/agents/api_links.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APILinks", "Pages"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/api_meta.py b/src/digitalocean_genai_sdk/types/agents/api_meta.py similarity index 88% rename from src/digitalocean_genai_sdk/types/genai/agents/api_meta.py rename to src/digitalocean_genai_sdk/types/agents/api_meta.py index 513897f6..9191812c 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/api_meta.py +++ b/src/digitalocean_genai_sdk/types/agents/api_meta.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APIMeta"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py b/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py similarity index 94% rename from src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py rename to src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py index 9eec78be..001baa6f 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_params.py +++ b/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py @@ -4,7 +4,7 @@ from typing_extensions import Required, Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["ChildAgentAddParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py rename to src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py index 38c04500..baccec10 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_add_response.py +++ b/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["ChildAgentAddResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py rename to src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py index 88f0d765..b50fb024 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_delete_response.py +++ b/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["ChildAgentDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py b/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py similarity index 94% rename from src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py rename to src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py index 96e84622..2f009a52 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_params.py +++ b/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Required, Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["ChildAgentUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py rename to src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py index 94eca533..48a13c72 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_update_response.py +++ b/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["ChildAgentUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py rename to src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py index 4bbf7464..ffbaef12 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/child_agent_view_response.py +++ b/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py @@ -4,7 +4,7 @@ from typing import List, Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["ChildAgentViewResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py b/src/digitalocean_genai_sdk/types/agents/function_create_params.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py rename to src/digitalocean_genai_sdk/types/agents/function_create_params.py index 6ca55898..938fb1d5 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_create_params.py +++ b/src/digitalocean_genai_sdk/types/agents/function_create_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["FunctionCreateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py b/src/digitalocean_genai_sdk/types/agents/function_create_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py rename to src/digitalocean_genai_sdk/types/agents/function_create_response.py index d1bf4a0f..82ab984b 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_create_response.py +++ b/src/digitalocean_genai_sdk/types/agents/function_create_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["FunctionCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py b/src/digitalocean_genai_sdk/types/agents/function_delete_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py rename to src/digitalocean_genai_sdk/types/agents/function_delete_response.py index f39a4ba1..678ef62d 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_delete_response.py +++ b/src/digitalocean_genai_sdk/types/agents/function_delete_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["FunctionDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py b/src/digitalocean_genai_sdk/types/agents/function_update_params.py similarity index 94% rename from src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py rename to src/digitalocean_genai_sdk/types/agents/function_update_params.py index 54ca15aa..2fa8e8f0 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_update_params.py +++ b/src/digitalocean_genai_sdk/types/agents/function_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Required, Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["FunctionUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py b/src/digitalocean_genai_sdk/types/agents/function_update_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py rename to src/digitalocean_genai_sdk/types/agents/function_update_response.py index a63fc100..82fc63be 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/function_update_response.py +++ b/src/digitalocean_genai_sdk/types/agents/function_update_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["FunctionUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py b/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py rename to src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py index a3b7de4c..76bb4236 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/knowledge_base_detach_response.py +++ b/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py @@ -4,7 +4,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["KnowledgeBaseDetachResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_list_params.py b/src/digitalocean_genai_sdk/types/agents/version_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/agents/version_list_params.py rename to src/digitalocean_genai_sdk/types/agents/version_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py b/src/digitalocean_genai_sdk/types/agents/version_list_response.py similarity index 98% rename from src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py rename to src/digitalocean_genai_sdk/types/agents/version_list_response.py index bbe749b1..1f3c3d69 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/version_list_response.py +++ b/src/digitalocean_genai_sdk/types/agents/version_list_response.py @@ -6,8 +6,8 @@ from pydantic import Field as FieldInfo from .api_meta import APIMeta +from ..._models import BaseModel from .api_links import APILinks -from ...._models import BaseModel from ..api_retrieval_method import APIRetrievalMethod __all__ = [ diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py b/src/digitalocean_genai_sdk/types/agents/version_update_params.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py rename to src/digitalocean_genai_sdk/types/agents/version_update_params.py index 610d8670..d7fb01cb 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/version_update_params.py +++ b/src/digitalocean_genai_sdk/types/agents/version_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["VersionUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py b/src/digitalocean_genai_sdk/types/agents/version_update_response.py similarity index 95% rename from src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py rename to src/digitalocean_genai_sdk/types/agents/version_update_response.py index 4cfa4045..72058319 100644 --- a/src/digitalocean_genai_sdk/types/genai/agents/version_update_response.py +++ b/src/digitalocean_genai_sdk/types/agents/version_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["VersionUpdateResponse", "AuditHeader"] diff --git a/src/digitalocean_genai_sdk/types/genai/api_agent.py b/src/digitalocean_genai_sdk/types/api_agent.py similarity index 99% rename from src/digitalocean_genai_sdk/types/genai/api_agent.py rename to src/digitalocean_genai_sdk/types/api_agent.py index 243b8d53..cba97a42 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_agent.py +++ b/src/digitalocean_genai_sdk/types/api_agent.py @@ -6,7 +6,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel from .api_model import APIModel from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod diff --git a/src/digitalocean_genai_sdk/types/genai/api_agreement.py b/src/digitalocean_genai_sdk/types/api_agreement.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/api_agreement.py rename to src/digitalocean_genai_sdk/types/api_agreement.py index 512ec34a..c4359f1f 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_agreement.py +++ b/src/digitalocean_genai_sdk/types/api_agreement.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIAgreement"] diff --git a/src/digitalocean_genai_sdk/types/genai/api_deployment_visibility.py b/src/digitalocean_genai_sdk/types/api_deployment_visibility.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/api_deployment_visibility.py rename to src/digitalocean_genai_sdk/types/api_deployment_visibility.py diff --git a/src/digitalocean_genai_sdk/types/genai/api_indexing_job.py b/src/digitalocean_genai_sdk/types/api_indexing_job.py similarity index 96% rename from src/digitalocean_genai_sdk/types/genai/api_indexing_job.py rename to src/digitalocean_genai_sdk/types/api_indexing_job.py index 2809141c..f24aac94 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_indexing_job.py +++ b/src/digitalocean_genai_sdk/types/api_indexing_job.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIIndexingJob"] diff --git a/src/digitalocean_genai_sdk/types/genai/model_list_params.py b/src/digitalocean_genai_sdk/types/api_key_list_params.py similarity index 94% rename from src/digitalocean_genai_sdk/types/genai/model_list_params.py rename to src/digitalocean_genai_sdk/types/api_key_list_params.py index 4abc1dc1..a1ab60dc 100644 --- a/src/digitalocean_genai_sdk/types/genai/model_list_params.py +++ b/src/digitalocean_genai_sdk/types/api_key_list_params.py @@ -5,10 +5,10 @@ from typing import List from typing_extensions import Literal, TypedDict -__all__ = ["ModelListParams"] +__all__ = ["APIKeyListParams"] -class ModelListParams(TypedDict, total=False): +class APIKeyListParams(TypedDict, total=False): page: int """page number.""" diff --git a/src/digitalocean_genai_sdk/types/genai/model_list_response.py b/src/digitalocean_genai_sdk/types/api_key_list_response.py similarity index 88% rename from src/digitalocean_genai_sdk/types/genai/model_list_response.py rename to src/digitalocean_genai_sdk/types/api_key_list_response.py index ef9ea0f3..360de7a4 100644 --- a/src/digitalocean_genai_sdk/types/genai/model_list_response.py +++ b/src/digitalocean_genai_sdk/types/api_key_list_response.py @@ -3,13 +3,13 @@ from typing import List, Optional from datetime import datetime -from ..._models import BaseModel +from .._models import BaseModel from .api_agreement import APIAgreement from .agents.api_meta import APIMeta from .agents.api_links import APILinks from .api_model_version import APIModelVersion -__all__ = ["ModelListResponse", "Model"] +__all__ = ["APIKeyListResponse", "Model"] class Model(BaseModel): @@ -34,7 +34,7 @@ class Model(BaseModel): version: Optional[APIModelVersion] = None -class ModelListResponse(BaseModel): +class APIKeyListResponse(BaseModel): links: Optional[APILinks] = None meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/genai/models/__init__.py b/src/digitalocean_genai_sdk/types/api_keys/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/models/__init__.py rename to src/digitalocean_genai_sdk/types/api_keys/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_create_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_create_params.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py index 3840dd85..654e9f1e 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_key_create_response.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py index 5788a51e..4d81d047 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_list_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_list_params.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py index 966d7e49..535e2f96 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_key_list_response.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py @@ -2,7 +2,7 @@ from typing import List, Optional -from ...._models import BaseModel +from ..._models import BaseModel from ..agents.api_meta import APIMeta from ..agents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py index 3712576c..23c1c0b9 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_params.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["APIKeyUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py index dd6e050b..44a316dc 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_regenerate_response.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateRegenerateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py rename to src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py index 62ca0dfe..3671addf 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_key_update_response.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py b/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py rename to src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py index fd7545b3..bf354a47 100644 --- a/src/digitalocean_genai_sdk/types/genai/models/api_model_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APIModelAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py b/src/digitalocean_genai_sdk/types/api_knowledge_base.py similarity index 95% rename from src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py rename to src/digitalocean_genai_sdk/types/api_knowledge_base.py index 1ea8c901..5b4b6e2c 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_knowledge_base.py +++ b/src/digitalocean_genai_sdk/types/api_knowledge_base.py @@ -3,7 +3,7 @@ from typing import List, Optional from datetime import datetime -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["APIKnowledgeBase"] diff --git a/src/digitalocean_genai_sdk/types/genai/api_model.py b/src/digitalocean_genai_sdk/types/api_model.py similarity index 97% rename from src/digitalocean_genai_sdk/types/genai/api_model.py rename to src/digitalocean_genai_sdk/types/api_model.py index 38efa6dc..d680a638 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_model.py +++ b/src/digitalocean_genai_sdk/types/api_model.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel from .api_agreement import APIAgreement from .api_model_version import APIModelVersion diff --git a/src/digitalocean_genai_sdk/types/genai/api_model_version.py b/src/digitalocean_genai_sdk/types/api_model_version.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/api_model_version.py rename to src/digitalocean_genai_sdk/types/api_model_version.py index c96a4faf..2e118632 100644 --- a/src/digitalocean_genai_sdk/types/genai/api_model_version.py +++ b/src/digitalocean_genai_sdk/types/api_model_version.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIModelVersion"] diff --git a/src/digitalocean_genai_sdk/types/genai/api_retrieval_method.py b/src/digitalocean_genai_sdk/types/api_retrieval_method.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/api_retrieval_method.py rename to src/digitalocean_genai_sdk/types/api_retrieval_method.py diff --git a/src/digitalocean_genai_sdk/types/genai/auth/__init__.py b/src/digitalocean_genai_sdk/types/auth/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/auth/__init__.py rename to src/digitalocean_genai_sdk/types/auth/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/auth/agents/__init__.py b/src/digitalocean_genai_sdk/types/auth/agents/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/auth/agents/__init__.py rename to src/digitalocean_genai_sdk/types/auth/agents/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py b/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py rename to src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py index aa95c9c2..0df640f9 100644 --- a/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_params.py +++ b/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ....._utils import PropertyInfo +from ...._utils import PropertyInfo __all__ = ["TokenCreateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py b/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py similarity index 88% rename from src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py rename to src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py index 018d4755..e58b7399 100644 --- a/src/digitalocean_genai_sdk/types/genai/auth/agents/token_create_response.py +++ b/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel __all__ = ["TokenCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/__init__.py b/src/digitalocean_genai_sdk/types/genai/__init__.py deleted file mode 100644 index 486ef2b0..00000000 --- a/src/digitalocean_genai_sdk/types/genai/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_agent import APIAgent as APIAgent -from .api_model import APIModel as APIModel -from .api_agreement import APIAgreement as APIAgreement -from .api_indexing_job import APIIndexingJob as APIIndexingJob -from .agent_list_params import AgentListParams as AgentListParams -from .api_model_version import APIModelVersion as APIModelVersion -from .model_list_params import ModelListParams as ModelListParams -from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase -from .agent_create_params import AgentCreateParams as AgentCreateParams -from .agent_list_response import AgentListResponse as AgentListResponse -from .agent_update_params import AgentUpdateParams as AgentUpdateParams -from .model_list_response import ModelListResponse as ModelListResponse -from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod -from .agent_create_response import AgentCreateResponse as AgentCreateResponse -from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse -from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams -from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse -from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse -from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams -from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse -from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse -from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse -from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse -from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse -from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_create_params.py b/src/digitalocean_genai_sdk/types/indexing_job_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_create_params.py rename to src/digitalocean_genai_sdk/types/indexing_job_create_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py b/src/digitalocean_genai_sdk/types/indexing_job_create_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py rename to src/digitalocean_genai_sdk/types/indexing_job_create_response.py index 835ec60d..839bc83b 100644 --- a/src/digitalocean_genai_sdk/types/genai/indexing_job_create_response.py +++ b/src/digitalocean_genai_sdk/types/indexing_job_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_list_params.py b/src/digitalocean_genai_sdk/types/indexing_job_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_list_params.py rename to src/digitalocean_genai_sdk/types/indexing_job_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py b/src/digitalocean_genai_sdk/types/indexing_job_list_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py rename to src/digitalocean_genai_sdk/types/indexing_job_list_response.py index b6d62f88..1379cc55 100644 --- a/src/digitalocean_genai_sdk/types/genai/indexing_job_list_response.py +++ b/src/digitalocean_genai_sdk/types/indexing_job_list_response.py @@ -2,7 +2,7 @@ from typing import List, Optional -from ..._models import BaseModel +from .._models import BaseModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py similarity index 97% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py rename to src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py index a9d0c2c0..b178b984 100644 --- a/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_data_sources_response.py +++ b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py rename to src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py index 6034bdf1..95f33d7a 100644 --- a/src/digitalocean_genai_sdk/types/genai/indexing_job_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py similarity index 91% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py rename to src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py index 9359a42a..4c2848b0 100644 --- a/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_params.py +++ b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["IndexingJobUpdateCancelParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py rename to src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py index ae4b394f..d50e1865 100644 --- a/src/digitalocean_genai_sdk/types/genai/indexing_job_update_cancel_response.py +++ b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobUpdateCancelResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_create_params.py rename to src/digitalocean_genai_sdk/types/knowledge_base_create_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py rename to src/digitalocean_genai_sdk/types/knowledge_base_create_response.py index ec69972a..cc2d8b9f 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_base_create_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_knowledge_base import APIKnowledgeBase __all__ = ["KnowledgeBaseCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py similarity index 87% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py rename to src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py index 486423d3..6401e25a 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_base_delete_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["KnowledgeBaseDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_list_params.py rename to src/digitalocean_genai_sdk/types/knowledge_base_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py rename to src/digitalocean_genai_sdk/types/knowledge_base_list_response.py index 0db1bc1c..09ca1ad3 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_base_list_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py @@ -2,7 +2,7 @@ from typing import List, Optional -from ..._models import BaseModel +from .._models import BaseModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py similarity index 95% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py rename to src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py index e139049d..5a3b5f2c 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_base_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py @@ -3,7 +3,7 @@ from typing import Optional from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel from .api_knowledge_base import APIKnowledgeBase __all__ = ["KnowledgeBaseRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py similarity index 94% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py rename to src/digitalocean_genai_sdk/types/knowledge_base_update_params.py index 18bf442d..297c79de 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_params.py +++ b/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py @@ -5,7 +5,7 @@ from typing import List from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["KnowledgeBaseUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py rename to src/digitalocean_genai_sdk/types/knowledge_base_update_response.py index 2c8bfcce..f3ba2c32 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_base_update_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_knowledge_base import APIKnowledgeBase __all__ = ["KnowledgeBaseUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/__init__.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py index f3cf81ec..1dcc9639 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APIFileUploadDataSource"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_file_upload_data_source_param.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 96% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py index b281888d..df1cd3bb 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ...._models import BaseModel +from ..._models import BaseModel from ..api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource from .api_file_upload_data_source import APIFileUploadDataSource diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py index e35b5cbe..f3a0421a 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APISpacesDataSource"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_spaces_data_source_param.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py similarity index 96% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py index d2db09ff..4690c607 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py @@ -3,7 +3,7 @@ from typing import Optional from typing_extensions import Literal -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APIWebCrawlerDataSource"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/api_web_crawler_data_source_param.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py similarity index 95% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py index 5363eb15..b1abafdf 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_params.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ...._utils import PropertyInfo +from ..._utils import PropertyInfo from .api_spaces_data_source_param import APISpacesDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py similarity index 91% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py index 694c7a29..1035d3f4 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_create_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource __all__ = ["DataSourceCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py index e04383bc..53954d7f 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_delete_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["DataSourceDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_params.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_params.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py rename to src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py index 872a7923..78246ce1 100644 --- a/src/digitalocean_genai_sdk/types/genai/knowledge_bases/data_source_list_response.py +++ b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py @@ -2,7 +2,7 @@ from typing import List, Optional -from ...._models import BaseModel +from ..._models import BaseModel from ..agents.api_meta import APIMeta from ..agents.api_links import APILinks from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource diff --git a/src/digitalocean_genai_sdk/types/genai/providers/__init__.py b/src/digitalocean_genai_sdk/types/providers/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/__init__.py rename to src/digitalocean_genai_sdk/types/providers/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/__init__.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py b/src/digitalocean_genai_sdk/types/providers/anthropic/api_anthropic_api_key_info.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/api_anthropic_api_key_info.py index e18489b2..8459c11c 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/api_anthropic_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/api_anthropic_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ....._models import BaseModel +from ...._models import BaseModel __all__ = ["APIAnthropicAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_params.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py index 612baa41..5968aee0 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_create_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py index 9ad9f478..d9467f40 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_params.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py index 558aef6c..ba6ca946 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_agents_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py @@ -4,7 +4,7 @@ from typing import List, Optional -from ....._models import BaseModel +from ...._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_params.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py index 47f9cc3a..25c2895c 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_list_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py @@ -2,7 +2,7 @@ from typing import List, Optional -from ....._models import BaseModel +from ...._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py index 39141a92..3b185df8 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py index a9011e00..c07d7f66 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_params.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ....._utils import PropertyInfo +from ...._utils import PropertyInfo __all__ = ["KeyUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py rename to src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py index e8a5d19d..a73dabbd 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/anthropic/key_update_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/__init__.py b/src/digitalocean_genai_sdk/types/providers/openai/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/__init__.py rename to src/digitalocean_genai_sdk/types/providers/openai/__init__.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py b/src/digitalocean_genai_sdk/types/providers/openai/api_openai_api_key_info.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py rename to src/digitalocean_genai_sdk/types/providers/openai/api_openai_api_key_info.py index 7a4267a0..9bf7082f 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/api_openai_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/api_openai_api_key_info.py @@ -3,7 +3,7 @@ from typing import List, Optional from datetime import datetime -from ....._models import BaseModel +from ...._models import BaseModel from ...api_model import APIModel __all__ = ["APIOpenAIAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_params.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py index 539cebf2..37dc65a9 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_create_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py index fcf6bf04..7f07a584 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_params.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py similarity index 92% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py index 2ce69038..b5f5884b 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_list_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py @@ -2,7 +2,7 @@ from typing import List, Optional -from ....._models import BaseModel +from ...._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks from .api_openai_api_key_info import APIOpenAIAPIKeyInfo diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_params.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py similarity index 93% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py index ee25321e..f42edea6 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_agents_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py @@ -4,7 +4,7 @@ from typing import List, Optional -from ....._models import BaseModel +from ...._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py index 156edaeb..03cd8573 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py similarity index 90% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py index a9011e00..c07d7f66 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_params.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ....._utils import PropertyInfo +from ...._utils import PropertyInfo __all__ = ["KeyUpdateParams"] diff --git a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py similarity index 89% rename from src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py rename to src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py index bfb5faae..f14b65ce 100644 --- a/src/digitalocean_genai_sdk/types/genai/providers/openai/key_update_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ....._models import BaseModel +from ...._models import BaseModel from .api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py b/src/digitalocean_genai_sdk/types/region_list_params.py similarity index 77% rename from src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py rename to src/digitalocean_genai_sdk/types/region_list_params.py index 678c4ead..1db0ad50 100644 --- a/src/digitalocean_genai_sdk/types/genai_retrieve_regions_params.py +++ b/src/digitalocean_genai_sdk/types/region_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["GenaiRetrieveRegionsParams"] +__all__ = ["RegionListParams"] -class GenaiRetrieveRegionsParams(TypedDict, total=False): +class RegionListParams(TypedDict, total=False): serves_batch: bool """include datacenters that are capable of running batch jobs.""" diff --git a/src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py b/src/digitalocean_genai_sdk/types/region_list_response.py similarity index 81% rename from src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py rename to src/digitalocean_genai_sdk/types/region_list_response.py index e4b21d51..0f955b36 100644 --- a/src/digitalocean_genai_sdk/types/genai_retrieve_regions_response.py +++ b/src/digitalocean_genai_sdk/types/region_list_response.py @@ -4,7 +4,7 @@ from .._models import BaseModel -__all__ = ["GenaiRetrieveRegionsResponse", "Region"] +__all__ = ["RegionListResponse", "Region"] class Region(BaseModel): @@ -19,5 +19,5 @@ class Region(BaseModel): stream_inference_url: Optional[str] = None -class GenaiRetrieveRegionsResponse(BaseModel): +class RegionListResponse(BaseModel): regions: Optional[List[Region]] = None diff --git a/tests/api_resources/genai/__init__.py b/tests/api_resources/agents/__init__.py similarity index 100% rename from tests/api_resources/genai/__init__.py rename to tests/api_resources/agents/__init__.py diff --git a/tests/api_resources/genai/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py similarity index 83% rename from tests/api_resources/genai/agents/test_api_keys.py rename to tests/api_resources/agents/test_api_keys.py index 2b55534a..911ac6f9 100644 --- a/tests/api_resources/genai/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,7 +26,7 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.create( + api_key = client.agents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -34,7 +34,7 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.create( + api_key = client.agents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.api_keys.with_raw_response.create( + response = client.agents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.api_keys.with_streaming_response.create( + with client.agents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -71,14 +71,14 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.create( + client.agents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.update( + api_key = client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -87,7 +87,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.update( + api_key = client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -99,7 +99,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.api_keys.with_raw_response.update( + response = client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -112,7 +112,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.api_keys.with_streaming_response.update( + with client.agents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -128,13 +128,13 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.update( + client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.update( + client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -142,7 +142,7 @@ def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.list( + api_key = client.agents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -150,7 +150,7 @@ def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.list( + api_key = client.agents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -160,7 +160,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.api_keys.with_raw_response.list( + response = client.agents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.api_keys.with_streaming_response.list( + with client.agents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -187,14 +187,14 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.list( + client.agents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.delete( + api_key = client.agents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.api_keys.with_raw_response.delete( + response = client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.api_keys.with_streaming_response.delete( + with client.agents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -232,13 +232,13 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.delete( + client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.delete( + client.agents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -246,7 +246,7 @@ def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.agents.api_keys.regenerate( + api_key = client.agents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -255,7 +255,7 @@ def test_method_regenerate(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.api_keys.with_raw_response.regenerate( + response = client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.api_keys.with_streaming_response.regenerate( + with client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -284,13 +284,13 @@ def test_streaming_response_regenerate(self, client: DigitaloceanGenaiSDK) -> No @parametrize def test_path_params_regenerate(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.regenerate( + client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.agents.api_keys.with_raw_response.regenerate( + client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.create( + api_key = await async_client.agents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -310,7 +310,7 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.create( + api_key = await async_client.agents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -320,7 +320,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.api_keys.with_raw_response.create( + response = await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.api_keys.with_streaming_response.create( + async with async_client.agents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -347,14 +347,14 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.create( + await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.update( + api_key = await async_client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -363,7 +363,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.update( + api_key = await async_client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -375,7 +375,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.api_keys.with_raw_response.update( + response = await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -388,7 +388,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.api_keys.with_streaming_response.update( + async with async_client.agents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -404,13 +404,13 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.update( + await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.update( + await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -418,7 +418,7 @@ async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.list( + api_key = await async_client.agents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -426,7 +426,7 @@ async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.list( + api_key = await async_client.agents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -436,7 +436,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.api_keys.with_raw_response.list( + response = await async_client.agents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -448,7 +448,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.api_keys.with_streaming_response.list( + async with async_client.agents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -463,14 +463,14 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @parametrize async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.list( + await async_client.agents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.delete( + api_key = await async_client.agents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -479,7 +479,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.api_keys.with_raw_response.delete( + response = await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -492,7 +492,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.api_keys.with_streaming_response.delete( + async with async_client.agents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -508,13 +508,13 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.delete( + await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.delete( + await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -522,7 +522,7 @@ async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.agents.api_keys.regenerate( + api_key = await async_client.agents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -531,7 +531,7 @@ async def test_method_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_raw_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.api_keys.with_raw_response.regenerate( + response = await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -544,7 +544,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.api_keys.with_streaming_response.regenerate( + async with async_client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -560,13 +560,13 @@ async def test_streaming_response_regenerate(self, async_client: AsyncDigitaloce @parametrize async def test_path_params_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.regenerate( + await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.agents.api_keys.with_raw_response.regenerate( + await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/genai/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py similarity index 84% rename from tests/api_resources/genai/agents/test_child_agents.py rename to tests/api_resources/agents/test_child_agents.py index ee94b3fb..cfc8084e 100644 --- a/tests/api_resources/genai/agents/test_child_agents.py +++ b/tests/api_resources/agents/test_child_agents.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( ChildAgentAddResponse, ChildAgentViewResponse, ChildAgentDeleteResponse, @@ -25,7 +25,7 @@ class TestChildAgents: @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.genai.agents.child_agents.update( + child_agent = client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -34,7 +34,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.genai.agents.child_agents.update( + child_agent = client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -48,7 +48,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.child_agents.with_raw_response.update( + response = client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -61,7 +61,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.child_agents.with_streaming_response.update( + with client.agents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -79,13 +79,13 @@ def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.genai.agents.child_agents.with_raw_response.update( + client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.genai.agents.child_agents.with_raw_response.update( + client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -93,7 +93,7 @@ def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.genai.agents.child_agents.delete( + child_agent = client.agents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -102,7 +102,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.child_agents.with_raw_response.delete( + response = client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -115,7 +115,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.child_agents.with_streaming_response.delete( + with client.agents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.genai.agents.child_agents.with_raw_response.delete( + client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.genai.agents.child_agents.with_raw_response.delete( + client.agents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -145,7 +145,7 @@ def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.genai.agents.child_agents.add( + child_agent = client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -154,7 +154,7 @@ def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_add_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.genai.agents.child_agents.add( + child_agent = client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -167,7 +167,7 @@ def test_method_add_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.child_agents.with_raw_response.add( + response = client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -180,7 +180,7 @@ def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.child_agents.with_streaming_response.add( + with client.agents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -198,13 +198,13 @@ def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.genai.agents.child_agents.with_raw_response.add( + client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.genai.agents.child_agents.with_raw_response.add( + client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -212,7 +212,7 @@ def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_view(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.genai.agents.child_agents.view( + child_agent = client.agents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -220,7 +220,7 @@ def test_method_view(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_view(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.child_agents.with_raw_response.view( + response = client.agents.child_agents.with_raw_response.view( "uuid", ) @@ -232,7 +232,7 @@ def test_raw_response_view(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_view(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.child_agents.with_streaming_response.view( + with client.agents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -247,7 +247,7 @@ def test_streaming_response_view(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_view(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.agents.child_agents.with_raw_response.view( + client.agents.child_agents.with_raw_response.view( "", ) @@ -258,7 +258,7 @@ class TestAsyncChildAgents: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.genai.agents.child_agents.update( + child_agent = await async_client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -267,7 +267,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.genai.agents.child_agents.update( + child_agent = await async_client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -281,7 +281,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.child_agents.with_raw_response.update( + response = await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -294,7 +294,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.child_agents.with_streaming_response.update( + async with async_client.agents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -312,13 +312,13 @@ async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.genai.agents.child_agents.with_raw_response.update( + await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.genai.agents.child_agents.with_raw_response.update( + await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -326,7 +326,7 @@ async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.genai.agents.child_agents.delete( + child_agent = await async_client.agents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -335,7 +335,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.child_agents.with_raw_response.delete( + response = await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -348,7 +348,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.child_agents.with_streaming_response.delete( + async with async_client.agents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -364,13 +364,13 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.genai.agents.child_agents.with_raw_response.delete( + await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.genai.agents.child_agents.with_raw_response.delete( + await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -378,7 +378,7 @@ async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.genai.agents.child_agents.add( + child_agent = await async_client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -387,7 +387,7 @@ async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize async def test_method_add_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.genai.agents.child_agents.add( + child_agent = await async_client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -400,7 +400,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncDigitaloceanG @pytest.mark.skip() @parametrize async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.child_agents.with_raw_response.add( + response = await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -413,7 +413,7 @@ async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) - @pytest.mark.skip() @parametrize async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.child_agents.with_streaming_response.add( + async with async_client.agents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -431,13 +431,13 @@ async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.genai.agents.child_agents.with_raw_response.add( + await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.genai.agents.child_agents.with_raw_response.add( + await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -445,7 +445,7 @@ async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize async def test_method_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.genai.agents.child_agents.view( + child_agent = await async_client.agents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -453,7 +453,7 @@ async def test_method_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize async def test_raw_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.child_agents.with_raw_response.view( + response = await async_client.agents.child_agents.with_raw_response.view( "uuid", ) @@ -465,7 +465,7 @@ async def test_raw_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.child_agents.with_streaming_response.view( + async with async_client.agents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -480,6 +480,6 @@ async def test_streaming_response_view(self, async_client: AsyncDigitaloceanGena @parametrize async def test_path_params_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.agents.child_agents.with_raw_response.view( + await async_client.agents.child_agents.with_raw_response.view( "", ) diff --git a/tests/api_resources/genai/agents/test_functions.py b/tests/api_resources/agents/test_functions.py similarity index 85% rename from tests/api_resources/genai/agents/test_functions.py rename to tests/api_resources/agents/test_functions.py index c2e470df..d66590ba 100644 --- a/tests/api_resources/genai/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, @@ -24,7 +24,7 @@ class TestFunctions: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - function = client.genai.agents.functions.create( + function = client.agents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - function = client.genai.agents.functions.create( + function = client.agents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.functions.with_raw_response.create( + response = client.agents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.functions.with_streaming_response.create( + with client.agents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -74,14 +74,14 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.genai.agents.functions.with_raw_response.create( + client.agents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - function = client.genai.agents.functions.update( + function = client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -90,7 +90,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - function = client.genai.agents.functions.update( + function = client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -107,7 +107,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.functions.with_raw_response.update( + response = client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -120,7 +120,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.functions.with_streaming_response.update( + with client.agents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -136,13 +136,13 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.genai.agents.functions.with_raw_response.update( + client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.genai.agents.functions.with_raw_response.update( + client.agents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -150,7 +150,7 @@ def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - function = client.genai.agents.functions.delete( + function = client.agents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -159,7 +159,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.functions.with_raw_response.delete( + response = client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.functions.with_streaming_response.delete( + with client.agents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -188,13 +188,13 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.functions.with_raw_response.delete( + client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.genai.agents.functions.with_raw_response.delete( + client.agents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) @@ -206,7 +206,7 @@ class TestAsyncFunctions: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.genai.agents.functions.create( + function = await async_client.agents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -214,7 +214,7 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.genai.agents.functions.create( + function = await async_client.agents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -229,7 +229,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.functions.with_raw_response.create( + response = await async_client.agents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -241,7 +241,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.functions.with_streaming_response.create( + async with async_client.agents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -256,14 +256,14 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.genai.agents.functions.with_raw_response.create( + await async_client.agents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.genai.agents.functions.update( + function = await async_client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -272,7 +272,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.genai.agents.functions.update( + function = await async_client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -289,7 +289,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.functions.with_raw_response.update( + response = await async_client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.functions.with_streaming_response.update( + async with async_client.agents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -318,13 +318,13 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.genai.agents.functions.with_raw_response.update( + await async_client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.genai.agents.functions.with_raw_response.update( + await async_client.agents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.genai.agents.functions.delete( + function = await async_client.agents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -341,7 +341,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.functions.with_raw_response.delete( + response = await async_client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -354,7 +354,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.functions.with_streaming_response.delete( + async with async_client.agents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -370,13 +370,13 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.functions.with_raw_response.delete( + await async_client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.genai.agents.functions.with_raw_response.delete( + await async_client.agents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/genai/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py similarity index 82% rename from tests/api_resources/genai/agents/test_knowledge_bases.py rename to tests/api_resources/agents/test_knowledge_bases.py index ac7d97d1..b313b1af 100644 --- a/tests/api_resources/genai/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from digitalocean_genai_sdk.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize def test_method_attach(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.agents.knowledge_bases.attach( + knowledge_base = client.agents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -28,7 +28,7 @@ def test_method_attach(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.knowledge_bases.with_raw_response.attach( + response = client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -40,7 +40,7 @@ def test_raw_response_attach(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.knowledge_bases.with_streaming_response.attach( + with client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -55,14 +55,14 @@ def test_streaming_response_attach(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_attach(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.knowledge_bases.with_raw_response.attach( + client.agents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize def test_method_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.agents.knowledge_bases.attach_single( + knowledge_base = client.agents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -71,7 +71,7 @@ def test_method_attach_single(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.knowledge_bases.with_raw_response.attach_single( + response = client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.knowledge_bases.with_streaming_response.attach_single( + with client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -100,13 +100,13 @@ def test_streaming_response_attach_single(self, client: DigitaloceanGenaiSDK) -> @parametrize def test_path_params_attach_single(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.knowledge_bases.with_raw_response.attach_single( + client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.genai.agents.knowledge_bases.with_raw_response.attach_single( + client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -114,7 +114,7 @@ def test_path_params_attach_single(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_detach(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.agents.knowledge_bases.detach( + knowledge_base = client.agents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -123,7 +123,7 @@ def test_method_detach(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_detach(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.knowledge_bases.with_raw_response.detach( + response = client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -136,7 +136,7 @@ def test_raw_response_detach(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_detach(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.knowledge_bases.with_streaming_response.detach( + with client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -152,13 +152,13 @@ def test_streaming_response_detach(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_detach(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.genai.agents.knowledge_bases.with_raw_response.detach( + client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.genai.agents.knowledge_bases.with_raw_response.detach( + client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -170,7 +170,7 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize async def test_method_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.agents.knowledge_bases.attach( + knowledge_base = await async_client.agents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -178,7 +178,7 @@ async def test_method_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.knowledge_bases.with_raw_response.attach( + response = await async_client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -190,7 +190,7 @@ async def test_raw_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.knowledge_bases.with_streaming_response.attach( + async with async_client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -205,14 +205,14 @@ async def test_streaming_response_attach(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.knowledge_bases.with_raw_response.attach( + await async_client.agents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize async def test_method_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.agents.knowledge_bases.attach_single( + knowledge_base = await async_client.agents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -221,7 +221,7 @@ async def test_method_attach_single(self, async_client: AsyncDigitaloceanGenaiSD @pytest.mark.skip() @parametrize async def test_raw_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.knowledge_bases.with_raw_response.attach_single( + response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -234,7 +234,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncDigitaloceanG @pytest.mark.skip() @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.knowledge_bases.with_streaming_response.attach_single( + async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -250,13 +250,13 @@ async def test_streaming_response_attach_single(self, async_client: AsyncDigital @parametrize async def test_path_params_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.knowledge_bases.with_raw_response.attach_single( + await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.genai.agents.knowledge_bases.with_raw_response.attach_single( + await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -264,7 +264,7 @@ async def test_path_params_attach_single(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.agents.knowledge_bases.detach( + knowledge_base = await async_client.agents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -273,7 +273,7 @@ async def test_method_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.knowledge_bases.with_raw_response.detach( + response = await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -286,7 +286,7 @@ async def test_raw_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.knowledge_bases.with_streaming_response.detach( + async with async_client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -302,13 +302,13 @@ async def test_streaming_response_detach(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.genai.agents.knowledge_bases.with_raw_response.detach( + await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.genai.agents.knowledge_bases.with_raw_response.detach( + await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/genai/agents/test_versions.py b/tests/api_resources/agents/test_versions.py similarity index 83% rename from tests/api_resources/genai/agents/test_versions.py rename to tests/api_resources/agents/test_versions.py index d954927d..94f02d8c 100644 --- a/tests/api_resources/genai/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.agents import ( +from digitalocean_genai_sdk.types.agents import ( VersionListResponse, VersionUpdateResponse, ) @@ -23,7 +23,7 @@ class TestVersions: @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - version = client.genai.agents.versions.update( + version = client.agents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -31,7 +31,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - version = client.genai.agents.versions.update( + version = client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -41,7 +41,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.versions.with_raw_response.update( + response = client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -53,7 +53,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.versions.with_streaming_response.update( + with client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -68,14 +68,14 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.genai.agents.versions.with_raw_response.update( + client.agents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - version = client.genai.agents.versions.list( + version = client.agents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -83,7 +83,7 @@ def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - version = client.genai.agents.versions.list( + version = client.agents.versions.list( uuid="uuid", page=0, per_page=0, @@ -93,7 +93,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -105,7 +105,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.versions.with_streaming_response.list( + with client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -120,7 +120,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.agents.versions.with_raw_response.list( + client.agents.versions.with_raw_response.list( uuid="", ) @@ -131,7 +131,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - version = await async_client.genai.agents.versions.update( + version = await async_client.agents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -139,7 +139,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - version = await async_client.genai.agents.versions.update( + version = await async_client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -149,7 +149,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.versions.with_raw_response.update( + response = await async_client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -161,7 +161,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.versions.with_streaming_response.update( + async with async_client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -176,14 +176,14 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.genai.agents.versions.with_raw_response.update( + await async_client.agents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - version = await async_client.genai.agents.versions.list( + version = await async_client.agents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - version = await async_client.genai.agents.versions.list( + version = await async_client.agents.versions.list( uuid="uuid", page=0, per_page=0, @@ -201,7 +201,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.versions.with_raw_response.list( + response = await async_client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -213,7 +213,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.versions.with_streaming_response.list( + async with async_client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -228,6 +228,6 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @parametrize async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.agents.versions.with_raw_response.list( + await async_client.agents.versions.with_raw_response.list( uuid="", ) diff --git a/tests/api_resources/genai/agents/__init__.py b/tests/api_resources/api_keys/__init__.py similarity index 100% rename from tests/api_resources/genai/agents/__init__.py rename to tests/api_resources/api_keys/__init__.py diff --git a/tests/api_resources/genai/models/test_api_keys.py b/tests/api_resources/api_keys/test_api_keys_.py similarity index 82% rename from tests/api_resources/genai/models/test_api_keys.py rename to tests/api_resources/api_keys/test_api_keys_.py index afc9caa3..0ae74d6b 100644 --- a/tests/api_resources/genai/models/test_api_keys.py +++ b/tests/api_resources/api_keys/test_api_keys_.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.models import ( +from digitalocean_genai_sdk.types.api_keys import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,13 +26,13 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.create() + api_key = client.api_keys.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.create( + api_key = client.api_keys.api_keys.create( name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.models.api_keys.with_raw_response.create() + response = client.api_keys.api_keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -50,7 +50,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.models.api_keys.with_streaming_response.create() as response: + with client.api_keys.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,7 +62,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.update( + api_key = client.api_keys.api_keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -70,7 +70,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.update( + api_key = client.api_keys.api_keys.update( path_api_key_uuid="api_key_uuid", body_api_key_uuid="api_key_uuid", name="name", @@ -80,7 +80,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.models.api_keys.with_raw_response.update( + response = client.api_keys.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -92,7 +92,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.models.api_keys.with_streaming_response.update( + with client.api_keys.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -107,20 +107,20 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.genai.models.api_keys.with_raw_response.update( + client.api_keys.api_keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.list() + api_key = client.api_keys.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.list( + api_key = client.api_keys.api_keys.list( page=0, per_page=0, ) @@ -129,7 +129,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.models.api_keys.with_raw_response.list() + response = client.api_keys.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -139,7 +139,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.models.api_keys.with_streaming_response.list() as response: + with client.api_keys.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -151,7 +151,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.delete( + api_key = client.api_keys.api_keys.delete( "api_key_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -159,7 +159,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.models.api_keys.with_raw_response.delete( + response = client.api_keys.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -171,7 +171,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.models.api_keys.with_streaming_response.delete( + with client.api_keys.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -186,14 +186,14 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.models.api_keys.with_raw_response.delete( + client.api_keys.api_keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.genai.models.api_keys.update_regenerate( + api_key = client.api_keys.api_keys.update_regenerate( "api_key_uuid", ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -201,7 +201,7 @@ def test_method_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.models.api_keys.with_raw_response.update_regenerate( + response = client.api_keys.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize def test_streaming_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.models.api_keys.with_streaming_response.update_regenerate( + with client.api_keys.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_regenerate(self, client: DigitaloceanGenaiSDK @parametrize def test_path_params_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.models.api_keys.with_raw_response.update_regenerate( + client.api_keys.api_keys.with_raw_response.update_regenerate( "", ) @@ -239,13 +239,13 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.create() + api_key = await async_client.api_keys.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.create( + api_key = await async_client.api_keys.api_keys.create( name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -253,7 +253,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.models.api_keys.with_raw_response.create() + response = await async_client.api_keys.api_keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -263,7 +263,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.models.api_keys.with_streaming_response.create() as response: + async with async_client.api_keys.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -275,7 +275,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.update( + api_key = await async_client.api_keys.api_keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -283,7 +283,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.update( + api_key = await async_client.api_keys.api_keys.update( path_api_key_uuid="api_key_uuid", body_api_key_uuid="api_key_uuid", name="name", @@ -293,7 +293,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.models.api_keys.with_raw_response.update( + response = await async_client.api_keys.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -305,7 +305,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.models.api_keys.with_streaming_response.update( + async with async_client.api_keys.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -320,20 +320,20 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.genai.models.api_keys.with_raw_response.update( + await async_client.api_keys.api_keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.list() + api_key = await async_client.api_keys.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.list( + api_key = await async_client.api_keys.api_keys.list( page=0, per_page=0, ) @@ -342,7 +342,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.models.api_keys.with_raw_response.list() + response = await async_client.api_keys.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -352,7 +352,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.models.api_keys.with_streaming_response.list() as response: + async with async_client.api_keys.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -364,7 +364,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.delete( + api_key = await async_client.api_keys.api_keys.delete( "api_key_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -372,7 +372,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.models.api_keys.with_raw_response.delete( + response = await async_client.api_keys.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -384,7 +384,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.models.api_keys.with_streaming_response.delete( + async with async_client.api_keys.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -399,14 +399,14 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.models.api_keys.with_raw_response.delete( + await async_client.api_keys.api_keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.genai.models.api_keys.update_regenerate( + api_key = await async_client.api_keys.api_keys.update_regenerate( "api_key_uuid", ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -414,7 +414,7 @@ async def test_method_update_regenerate(self, async_client: AsyncDigitaloceanGen @pytest.mark.skip() @parametrize async def test_raw_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.models.api_keys.with_raw_response.update_regenerate( + response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -426,7 +426,7 @@ async def test_raw_response_update_regenerate(self, async_client: AsyncDigitaloc @pytest.mark.skip() @parametrize async def test_streaming_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.models.api_keys.with_streaming_response.update_regenerate( + async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: assert not response.is_closed @@ -441,6 +441,6 @@ async def test_streaming_response_update_regenerate(self, async_client: AsyncDig @parametrize async def test_path_params_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.models.api_keys.with_raw_response.update_regenerate( + await async_client.api_keys.api_keys.with_raw_response.update_regenerate( "", ) diff --git a/tests/api_resources/genai/auth/__init__.py b/tests/api_resources/auth/__init__.py similarity index 100% rename from tests/api_resources/genai/auth/__init__.py rename to tests/api_resources/auth/__init__.py diff --git a/tests/api_resources/genai/auth/agents/__init__.py b/tests/api_resources/auth/agents/__init__.py similarity index 100% rename from tests/api_resources/genai/auth/agents/__init__.py rename to tests/api_resources/auth/agents/__init__.py diff --git a/tests/api_resources/genai/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py similarity index 83% rename from tests/api_resources/genai/auth/agents/test_token.py rename to tests/api_resources/auth/agents/test_token.py index 04cdd452..1e505ccd 100644 --- a/tests/api_resources/genai/auth/agents/test_token.py +++ b/tests/api_resources/auth/agents/test_token.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.auth.agents import TokenCreateResponse +from digitalocean_genai_sdk.types.auth.agents import TokenCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestToken: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - token = client.genai.auth.agents.token.create( + token = client.auth.agents.token.create( path_agent_uuid="agent_uuid", ) assert_matches_type(TokenCreateResponse, token, path=["response"]) @@ -28,7 +28,7 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - token = client.genai.auth.agents.token.create( + token = client.auth.agents.token.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", ) @@ -37,7 +37,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.auth.agents.token.with_raw_response.create( + response = client.auth.agents.token.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.auth.agents.token.with_streaming_response.create( + with client.auth.agents.token.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.genai.auth.agents.token.with_raw_response.create( + client.auth.agents.token.with_raw_response.create( path_agent_uuid="", ) @@ -75,7 +75,7 @@ class TestAsyncToken: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - token = await async_client.genai.auth.agents.token.create( + token = await async_client.auth.agents.token.create( path_agent_uuid="agent_uuid", ) assert_matches_type(TokenCreateResponse, token, path=["response"]) @@ -83,7 +83,7 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - token = await async_client.genai.auth.agents.token.create( + token = await async_client.auth.agents.token.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", ) @@ -92,7 +92,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.auth.agents.token.with_raw_response.create( + response = await async_client.auth.agents.token.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -104,7 +104,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.auth.agents.token.with_streaming_response.create( + async with async_client.auth.agents.token.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -119,6 +119,6 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.genai.auth.agents.token.with_raw_response.create( + await async_client.auth.agents.token.with_raw_response.create( path_agent_uuid="", ) diff --git a/tests/api_resources/genai/providers/openai/__init__.py b/tests/api_resources/genai/providers/openai/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/genai/providers/openai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/genai/knowledge_bases/__init__.py b/tests/api_resources/knowledge_bases/__init__.py similarity index 100% rename from tests/api_resources/genai/knowledge_bases/__init__.py rename to tests/api_resources/knowledge_bases/__init__.py diff --git a/tests/api_resources/genai/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py similarity index 83% rename from tests/api_resources/genai/knowledge_bases/test_data_sources.py rename to tests/api_resources/knowledge_bases/test_data_sources.py index b51c7e0c..68fd67e5 100644 --- a/tests/api_resources/genai/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.knowledge_bases import ( +from digitalocean_genai_sdk.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, @@ -24,7 +24,7 @@ class TestDataSources: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.genai.knowledge_bases.data_sources.create( + data_source = client.knowledge_bases.data_sources.create( path_knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.genai.knowledge_bases.data_sources.create( + data_source = client.knowledge_bases.data_sources.create( path_knowledge_base_uuid="knowledge_base_uuid", aws_data_source={ "bucket_name": "bucket_name", @@ -58,7 +58,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.data_sources.with_raw_response.create( + response = client.knowledge_bases.data_sources.with_raw_response.create( path_knowledge_base_uuid="knowledge_base_uuid", ) @@ -70,7 +70,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.data_sources.with_streaming_response.create( + with client.knowledge_bases.data_sources.with_streaming_response.create( path_knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed @@ -87,14 +87,14 @@ def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" ): - client.genai.knowledge_bases.data_sources.with_raw_response.create( + client.knowledge_bases.data_sources.with_raw_response.create( path_knowledge_base_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.genai.knowledge_bases.data_sources.list( + data_source = client.knowledge_bases.data_sources.list( knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -102,7 +102,7 @@ def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.genai.knowledge_bases.data_sources.list( + data_source = client.knowledge_bases.data_sources.list( knowledge_base_uuid="knowledge_base_uuid", page=0, per_page=0, @@ -112,7 +112,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.data_sources.with_raw_response.list( + response = client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="knowledge_base_uuid", ) @@ -124,7 +124,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.data_sources.with_streaming_response.list( + with client.knowledge_bases.data_sources.with_streaming_response.list( knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed @@ -139,14 +139,14 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.genai.knowledge_bases.data_sources.with_raw_response.list( + client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.genai.knowledge_bases.data_sources.delete( + data_source = client.knowledge_bases.data_sources.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="knowledge_base_uuid", ) @@ -155,7 +155,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.data_sources.with_raw_response.delete( + response = client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="knowledge_base_uuid", ) @@ -168,7 +168,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.data_sources.with_streaming_response.delete( + with client.knowledge_bases.data_sources.with_streaming_response.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="knowledge_base_uuid", ) as response: @@ -184,13 +184,13 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.genai.knowledge_bases.data_sources.with_raw_response.delete( + client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): - client.genai.knowledge_bases.data_sources.with_raw_response.delete( + client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", knowledge_base_uuid="knowledge_base_uuid", ) @@ -202,7 +202,7 @@ class TestAsyncDataSources: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.genai.knowledge_bases.data_sources.create( + data_source = await async_client.knowledge_bases.data_sources.create( path_knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -210,7 +210,7 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.genai.knowledge_bases.data_sources.create( + data_source = await async_client.knowledge_bases.data_sources.create( path_knowledge_base_uuid="knowledge_base_uuid", aws_data_source={ "bucket_name": "bucket_name", @@ -236,7 +236,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.data_sources.with_raw_response.create( + response = await async_client.knowledge_bases.data_sources.with_raw_response.create( path_knowledge_base_uuid="knowledge_base_uuid", ) @@ -248,7 +248,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.data_sources.with_streaming_response.create( + async with async_client.knowledge_bases.data_sources.with_streaming_response.create( path_knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed @@ -265,14 +265,14 @@ async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" ): - await async_client.genai.knowledge_bases.data_sources.with_raw_response.create( + await async_client.knowledge_bases.data_sources.with_raw_response.create( path_knowledge_base_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.genai.knowledge_bases.data_sources.list( + data_source = await async_client.knowledge_bases.data_sources.list( knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -280,7 +280,7 @@ async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.genai.knowledge_bases.data_sources.list( + data_source = await async_client.knowledge_bases.data_sources.list( knowledge_base_uuid="knowledge_base_uuid", page=0, per_page=0, @@ -290,7 +290,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.data_sources.with_raw_response.list( + response = await async_client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="knowledge_base_uuid", ) @@ -302,7 +302,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.data_sources.with_streaming_response.list( + async with async_client.knowledge_bases.data_sources.with_streaming_response.list( knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed @@ -317,14 +317,14 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @parametrize async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.genai.knowledge_bases.data_sources.with_raw_response.list( + await async_client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.genai.knowledge_bases.data_sources.delete( + data_source = await async_client.knowledge_bases.data_sources.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="knowledge_base_uuid", ) @@ -333,7 +333,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.data_sources.with_raw_response.delete( + response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="knowledge_base_uuid", ) @@ -346,7 +346,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.data_sources.with_streaming_response.delete( + async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="knowledge_base_uuid", ) as response: @@ -362,13 +362,13 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.genai.knowledge_bases.data_sources.with_raw_response.delete( + await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="data_source_uuid", knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): - await async_client.genai.knowledge_bases.data_sources.with_raw_response.delete( + await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", knowledge_base_uuid="knowledge_base_uuid", ) diff --git a/tests/api_resources/genai/models/__init__.py b/tests/api_resources/providers/__init__.py similarity index 100% rename from tests/api_resources/genai/models/__init__.py rename to tests/api_resources/providers/__init__.py diff --git a/tests/api_resources/genai/providers/__init__.py b/tests/api_resources/providers/anthropic/__init__.py similarity index 100% rename from tests/api_resources/genai/providers/__init__.py rename to tests/api_resources/providers/anthropic/__init__.py diff --git a/tests/api_resources/genai/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py similarity index 81% rename from tests/api_resources/genai/providers/anthropic/test_keys.py rename to tests/api_resources/providers/anthropic/test_keys.py index f83ec64b..c5491bd4 100644 --- a/tests/api_resources/genai/providers/anthropic/test_keys.py +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.providers.anthropic import ( +from digitalocean_genai_sdk.types.providers.anthropic import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, @@ -27,13 +27,13 @@ class TestKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.create() + key = client.providers.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.create( + key = client.providers.anthropic.keys.create( api_key="api_key", name="name", ) @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.anthropic.keys.with_raw_response.create() + response = client.providers.anthropic.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -52,7 +52,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.anthropic.keys.with_streaming_response.create() as response: + with client.providers.anthropic.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.retrieve( + key = client.providers.anthropic.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.anthropic.keys.with_raw_response.retrieve( + response = client.providers.anthropic.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.anthropic.keys.with_streaming_response.retrieve( + with client.providers.anthropic.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @parametrize def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.providers.anthropic.keys.with_raw_response.retrieve( + client.providers.anthropic.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.update( + key = client.providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -114,7 +114,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.update( + key = client.providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.anthropic.keys.with_raw_response.update( + response = client.providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -137,7 +137,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.anthropic.keys.with_streaming_response.update( + with client.providers.anthropic.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -152,20 +152,20 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.genai.providers.anthropic.keys.with_raw_response.update( + client.providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.list() + key = client.providers.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.list( + key = client.providers.anthropic.keys.list( page=0, per_page=0, ) @@ -174,7 +174,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.anthropic.keys.with_raw_response.list() + response = client.providers.anthropic.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -184,7 +184,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.anthropic.keys.with_streaming_response.list() as response: + with client.providers.anthropic.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -196,7 +196,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.delete( + key = client.providers.anthropic.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -204,7 +204,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.anthropic.keys.with_raw_response.delete( + response = client.providers.anthropic.keys.with_raw_response.delete( "api_key_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.anthropic.keys.with_streaming_response.delete( + with client.providers.anthropic.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -231,14 +231,14 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.providers.anthropic.keys.with_raw_response.delete( + client.providers.anthropic.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.list_agents( + key = client.providers.anthropic.keys.list_agents( uuid="uuid", ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) @@ -246,7 +246,7 @@ def test_method_list_agents(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_list_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.anthropic.keys.list_agents( + key = client.providers.anthropic.keys.list_agents( uuid="uuid", page=0, per_page=0, @@ -256,7 +256,7 @@ def test_method_list_agents_with_all_params(self, client: DigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize def test_raw_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.anthropic.keys.with_raw_response.list_agents( + response = client.providers.anthropic.keys.with_raw_response.list_agents( uuid="uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.anthropic.keys.with_streaming_response.list_agents( + with client.providers.anthropic.keys.with_streaming_response.list_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -283,7 +283,7 @@ def test_streaming_response_list_agents(self, client: DigitaloceanGenaiSDK) -> N @parametrize def test_path_params_list_agents(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.providers.anthropic.keys.with_raw_response.list_agents( + client.providers.anthropic.keys.with_raw_response.list_agents( uuid="", ) @@ -294,13 +294,13 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.create() + key = await async_client.providers.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.create( + key = await async_client.providers.anthropic.keys.create( api_key="api_key", name="name", ) @@ -309,7 +309,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.anthropic.keys.with_raw_response.create() + response = await async_client.providers.anthropic.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -319,7 +319,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.anthropic.keys.with_streaming_response.create() as response: + async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -331,7 +331,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.retrieve( + key = await async_client.providers.anthropic.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -339,7 +339,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.anthropic.keys.with_raw_response.retrieve( + response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -351,7 +351,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.anthropic.keys.with_streaming_response.retrieve( + async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -366,14 +366,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @parametrize async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.providers.anthropic.keys.with_raw_response.retrieve( + await async_client.providers.anthropic.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.update( + key = await async_client.providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -381,7 +381,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.update( + key = await async_client.providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -392,7 +392,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.anthropic.keys.with_raw_response.update( + response = await async_client.providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -404,7 +404,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.anthropic.keys.with_streaming_response.update( + async with async_client.providers.anthropic.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -419,20 +419,20 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.genai.providers.anthropic.keys.with_raw_response.update( + await async_client.providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.list() + key = await async_client.providers.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.list( + key = await async_client.providers.anthropic.keys.list( page=0, per_page=0, ) @@ -441,7 +441,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.anthropic.keys.with_raw_response.list() + response = await async_client.providers.anthropic.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -451,7 +451,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.anthropic.keys.with_streaming_response.list() as response: + async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -463,7 +463,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.delete( + key = await async_client.providers.anthropic.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -471,7 +471,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.anthropic.keys.with_raw_response.delete( + response = await async_client.providers.anthropic.keys.with_raw_response.delete( "api_key_uuid", ) @@ -483,7 +483,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.anthropic.keys.with_streaming_response.delete( + async with async_client.providers.anthropic.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -498,14 +498,14 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.providers.anthropic.keys.with_raw_response.delete( + await async_client.providers.anthropic.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.list_agents( + key = await async_client.providers.anthropic.keys.list_agents( uuid="uuid", ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) @@ -513,7 +513,7 @@ async def test_method_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.anthropic.keys.list_agents( + key = await async_client.providers.anthropic.keys.list_agents( uuid="uuid", page=0, per_page=0, @@ -523,7 +523,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncDigit @pytest.mark.skip() @parametrize async def test_raw_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.anthropic.keys.with_raw_response.list_agents( + response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( uuid="uuid", ) @@ -535,7 +535,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncDigitaloceanGen @pytest.mark.skip() @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.anthropic.keys.with_streaming_response.list_agents( + async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -550,6 +550,6 @@ async def test_streaming_response_list_agents(self, async_client: AsyncDigitaloc @parametrize async def test_path_params_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.providers.anthropic.keys.with_raw_response.list_agents( + await async_client.providers.anthropic.keys.with_raw_response.list_agents( uuid="", ) diff --git a/tests/api_resources/genai/providers/anthropic/__init__.py b/tests/api_resources/providers/openai/__init__.py similarity index 100% rename from tests/api_resources/genai/providers/anthropic/__init__.py rename to tests/api_resources/providers/openai/__init__.py diff --git a/tests/api_resources/genai/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py similarity index 81% rename from tests/api_resources/genai/providers/openai/test_keys.py rename to tests/api_resources/providers/openai/test_keys.py index 572292bc..b88b6a5f 100644 --- a/tests/api_resources/genai/providers/openai/test_keys.py +++ b/tests/api_resources/providers/openai/test_keys.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai.providers.openai import ( +from digitalocean_genai_sdk.types.providers.openai import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, @@ -27,13 +27,13 @@ class TestKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.create() + key = client.providers.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.create( + key = client.providers.openai.keys.create( api_key="api_key", name="name", ) @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.openai.keys.with_raw_response.create() + response = client.providers.openai.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -52,7 +52,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.openai.keys.with_streaming_response.create() as response: + with client.providers.openai.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.retrieve( + key = client.providers.openai.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.openai.keys.with_raw_response.retrieve( + response = client.providers.openai.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.openai.keys.with_streaming_response.retrieve( + with client.providers.openai.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @parametrize def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.providers.openai.keys.with_raw_response.retrieve( + client.providers.openai.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.update( + key = client.providers.openai.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -114,7 +114,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.update( + key = client.providers.openai.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.openai.keys.with_raw_response.update( + response = client.providers.openai.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -137,7 +137,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.openai.keys.with_streaming_response.update( + with client.providers.openai.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -152,20 +152,20 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.genai.providers.openai.keys.with_raw_response.update( + client.providers.openai.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.list() + key = client.providers.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.list( + key = client.providers.openai.keys.list( page=0, per_page=0, ) @@ -174,7 +174,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.openai.keys.with_raw_response.list() + response = client.providers.openai.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -184,7 +184,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.openai.keys.with_streaming_response.list() as response: + with client.providers.openai.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -196,7 +196,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.delete( + key = client.providers.openai.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -204,7 +204,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.openai.keys.with_raw_response.delete( + response = client.providers.openai.keys.with_raw_response.delete( "api_key_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.openai.keys.with_streaming_response.delete( + with client.providers.openai.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -231,14 +231,14 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.genai.providers.openai.keys.with_raw_response.delete( + client.providers.openai.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.retrieve_agents( + key = client.providers.openai.keys.retrieve_agents( uuid="uuid", ) assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) @@ -246,7 +246,7 @@ def test_method_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.genai.providers.openai.keys.retrieve_agents( + key = client.providers.openai.keys.retrieve_agents( uuid="uuid", page=0, per_page=0, @@ -256,7 +256,7 @@ def test_method_retrieve_agents_with_all_params(self, client: DigitaloceanGenaiS @pytest.mark.skip() @parametrize def test_raw_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + response = client.providers.openai.keys.with_raw_response.retrieve_agents( uuid="uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.providers.openai.keys.with_streaming_response.retrieve_agents( + with client.providers.openai.keys.with_streaming_response.retrieve_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -283,7 +283,7 @@ def test_streaming_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) @parametrize def test_path_params_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + client.providers.openai.keys.with_raw_response.retrieve_agents( uuid="", ) @@ -294,13 +294,13 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.create() + key = await async_client.providers.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.create( + key = await async_client.providers.openai.keys.create( api_key="api_key", name="name", ) @@ -309,7 +309,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.openai.keys.with_raw_response.create() + response = await async_client.providers.openai.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -319,7 +319,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.openai.keys.with_streaming_response.create() as response: + async with async_client.providers.openai.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -331,7 +331,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.retrieve( + key = await async_client.providers.openai.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -339,7 +339,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.openai.keys.with_raw_response.retrieve( + response = await async_client.providers.openai.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -351,7 +351,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.openai.keys.with_streaming_response.retrieve( + async with async_client.providers.openai.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -366,14 +366,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @parametrize async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.providers.openai.keys.with_raw_response.retrieve( + await async_client.providers.openai.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.update( + key = await async_client.providers.openai.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -381,7 +381,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.update( + key = await async_client.providers.openai.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -392,7 +392,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.openai.keys.with_raw_response.update( + response = await async_client.providers.openai.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -404,7 +404,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.openai.keys.with_streaming_response.update( + async with async_client.providers.openai.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -419,20 +419,20 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.genai.providers.openai.keys.with_raw_response.update( + await async_client.providers.openai.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.list() + key = await async_client.providers.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.list( + key = await async_client.providers.openai.keys.list( page=0, per_page=0, ) @@ -441,7 +441,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.openai.keys.with_raw_response.list() + response = await async_client.providers.openai.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -451,7 +451,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.openai.keys.with_streaming_response.list() as response: + async with async_client.providers.openai.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -463,7 +463,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.delete( + key = await async_client.providers.openai.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -471,7 +471,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.openai.keys.with_raw_response.delete( + response = await async_client.providers.openai.keys.with_raw_response.delete( "api_key_uuid", ) @@ -483,7 +483,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.openai.keys.with_streaming_response.delete( + async with async_client.providers.openai.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -498,14 +498,14 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.genai.providers.openai.keys.with_raw_response.delete( + await async_client.providers.openai.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.retrieve_agents( + key = await async_client.providers.openai.keys.retrieve_agents( uuid="uuid", ) assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) @@ -513,7 +513,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncDigitaloceanGenai @pytest.mark.skip() @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.genai.providers.openai.keys.retrieve_agents( + key = await async_client.providers.openai.keys.retrieve_agents( uuid="uuid", page=0, per_page=0, @@ -523,7 +523,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncD @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( uuid="uuid", ) @@ -535,7 +535,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncDigitalocea @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.providers.openai.keys.with_streaming_response.retrieve_agents( + async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -550,6 +550,6 @@ async def test_streaming_response_retrieve_agents(self, async_client: AsyncDigit @parametrize async def test_path_params_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.providers.openai.keys.with_raw_response.retrieve_agents( + await async_client.providers.openai.keys.with_raw_response.retrieve_agents( uuid="", ) diff --git a/tests/api_resources/genai/test_agents.py b/tests/api_resources/test_agents.py similarity index 85% rename from tests/api_resources/genai/test_agents.py rename to tests/api_resources/test_agents.py index e7e57f99..3aafae23 100644 --- a/tests/api_resources/genai/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai import ( +from digitalocean_genai_sdk.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, @@ -27,13 +27,13 @@ class TestAgents: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.create() + agent = client.agents.create() assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.create( + agent = client.agents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -50,7 +50,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.with_raw_response.create() + response = client.agents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -60,7 +60,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.with_streaming_response.create() as response: + with client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -72,7 +72,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.retrieve( + agent = client.agents.retrieve( "uuid", ) assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @@ -80,7 +80,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.with_raw_response.retrieve( + response = client.agents.with_raw_response.retrieve( "uuid", ) @@ -92,7 +92,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.with_streaming_response.retrieve( + with client.agents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -107,14 +107,14 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @parametrize def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.agents.with_raw_response.retrieve( + client.agents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.update( + agent = client.agents.update( path_uuid="uuid", ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -122,7 +122,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.update( + agent = client.agents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -145,7 +145,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.with_raw_response.update( + response = client.agents.with_raw_response.update( path_uuid="uuid", ) @@ -157,7 +157,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.with_streaming_response.update( + with client.agents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -172,20 +172,20 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.genai.agents.with_raw_response.update( + client.agents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.list() + agent = client.agents.list() assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.list( + agent = client.agents.list( only_deployed=True, page=0, per_page=0, @@ -195,7 +195,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.with_raw_response.list() + response = client.agents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -205,7 +205,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.with_streaming_response.list() as response: + with client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -217,7 +217,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.delete( + agent = client.agents.delete( "uuid", ) assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @@ -225,7 +225,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.with_raw_response.delete( + response = client.agents.with_raw_response.delete( "uuid", ) @@ -237,7 +237,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.with_streaming_response.delete( + with client.agents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed @@ -252,14 +252,14 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.agents.with_raw_response.delete( + client.agents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_status(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.update_status( + agent = client.agents.update_status( path_uuid="uuid", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -267,7 +267,7 @@ def test_method_update_status(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_status_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.genai.agents.update_status( + agent = client.agents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", @@ -277,7 +277,7 @@ def test_method_update_status_with_all_params(self, client: DigitaloceanGenaiSDK @pytest.mark.skip() @parametrize def test_raw_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.agents.with_raw_response.update_status( + response = client.agents.with_raw_response.update_status( path_uuid="uuid", ) @@ -289,7 +289,7 @@ def test_raw_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.agents.with_streaming_response.update_status( + with client.agents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed @@ -304,7 +304,7 @@ def test_streaming_response_update_status(self, client: DigitaloceanGenaiSDK) -> @parametrize def test_path_params_update_status(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.genai.agents.with_raw_response.update_status( + client.agents.with_raw_response.update_status( path_uuid="", ) @@ -315,13 +315,13 @@ class TestAsyncAgents: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.create() + agent = await async_client.agents.create() assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.create( + agent = await async_client.agents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -338,7 +338,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.with_raw_response.create() + response = await async_client.agents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -348,7 +348,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.with_streaming_response.create() as response: + async with async_client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -360,7 +360,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.retrieve( + agent = await async_client.agents.retrieve( "uuid", ) assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @@ -368,7 +368,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.with_raw_response.retrieve( + response = await async_client.agents.with_raw_response.retrieve( "uuid", ) @@ -380,7 +380,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.with_streaming_response.retrieve( + async with async_client.agents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -395,14 +395,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @parametrize async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.agents.with_raw_response.retrieve( + await async_client.agents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.update( + agent = await async_client.agents.update( path_uuid="uuid", ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -410,7 +410,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.update( + agent = await async_client.agents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -433,7 +433,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.with_raw_response.update( + response = await async_client.agents.with_raw_response.update( path_uuid="uuid", ) @@ -445,7 +445,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.with_streaming_response.update( + async with async_client.agents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -460,20 +460,20 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.genai.agents.with_raw_response.update( + await async_client.agents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.list() + agent = await async_client.agents.list() assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.list( + agent = await async_client.agents.list( only_deployed=True, page=0, per_page=0, @@ -483,7 +483,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.with_raw_response.list() + response = await async_client.agents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -493,7 +493,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.with_streaming_response.list() as response: + async with async_client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -505,7 +505,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.delete( + agent = await async_client.agents.delete( "uuid", ) assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @@ -513,7 +513,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.with_raw_response.delete( + response = await async_client.agents.with_raw_response.delete( "uuid", ) @@ -525,7 +525,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.with_streaming_response.delete( + async with async_client.agents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed @@ -540,14 +540,14 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.agents.with_raw_response.delete( + await async_client.agents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.update_status( + agent = await async_client.agents.update_status( path_uuid="uuid", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -555,7 +555,7 @@ async def test_method_update_status(self, async_client: AsyncDigitaloceanGenaiSD @pytest.mark.skip() @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.genai.agents.update_status( + agent = await async_client.agents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", @@ -565,7 +565,7 @@ async def test_method_update_status_with_all_params(self, async_client: AsyncDig @pytest.mark.skip() @parametrize async def test_raw_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.agents.with_raw_response.update_status( + response = await async_client.agents.with_raw_response.update_status( path_uuid="uuid", ) @@ -577,7 +577,7 @@ async def test_raw_response_update_status(self, async_client: AsyncDigitaloceanG @pytest.mark.skip() @parametrize async def test_streaming_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.agents.with_streaming_response.update_status( + async with async_client.agents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed @@ -592,6 +592,6 @@ async def test_streaming_response_update_status(self, async_client: AsyncDigital @parametrize async def test_path_params_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.genai.agents.with_raw_response.update_status( + await async_client.agents.with_raw_response.update_status( path_uuid="", ) diff --git a/tests/api_resources/genai/test_models.py b/tests/api_resources/test_api_keys.py similarity index 64% rename from tests/api_resources/genai/test_models.py rename to tests/api_resources/test_api_keys.py index a5e9091e..198eb261 100644 --- a/tests/api_resources/genai/test_models.py +++ b/tests/api_resources/test_api_keys.py @@ -9,92 +9,92 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai import ModelListResponse +from digitalocean_genai_sdk.types import APIKeyListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestModels: +class TestAPIKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - model = client.genai.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + api_key = client.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - model = client.genai.models.list( + api_key = client.api_keys.list( page=0, per_page=0, public_only=True, usecases=["MODEL_USECASE_UNKNOWN"], ) - assert_matches_type(ModelListResponse, model, path=["response"]) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.models.with_raw_response.list() + response = client.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.models.with_streaming_response.list() as response: + with client.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) assert cast(Any, response.is_closed) is True -class TestAsyncModels: +class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - model = await async_client.genai.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + api_key = await async_client.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - model = await async_client.genai.models.list( + api_key = await async_client.api_keys.list( page=0, per_page=0, public_only=True, usecases=["MODEL_USECASE_UNKNOWN"], ) - assert_matches_type(ModelListResponse, model, path=["response"]) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.models.with_raw_response.list() + response = await async_client.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.models.with_streaming_response.list() as response: + async with async_client.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_genai.py b/tests/api_resources/test_genai.py deleted file mode 100644 index 6a6c5d93..00000000 --- a/tests/api_resources/test_genai.py +++ /dev/null @@ -1,96 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import GenaiRetrieveRegionsResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestGenai: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_regions(self, client: DigitaloceanGenaiSDK) -> None: - genai = client.genai.retrieve_regions() - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_regions_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - genai = client.genai.retrieve_regions( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_regions(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.with_raw_response.retrieve_regions() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - genai = response.parse() - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_regions(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.with_streaming_response.retrieve_regions() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - genai = response.parse() - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncGenai: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_regions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - genai = await async_client.genai.retrieve_regions() - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_regions_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - genai = await async_client.genai.retrieve_regions( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_regions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.with_raw_response.retrieve_regions() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - genai = await response.parse() - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_regions(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.with_streaming_response.retrieve_regions() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - genai = await response.parse() - assert_matches_type(GenaiRetrieveRegionsResponse, genai, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/genai/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py similarity index 82% rename from tests/api_resources/genai/test_indexing_jobs.py rename to tests/api_resources/test_indexing_jobs.py index 89d9295b..9ae7ec50 100644 --- a/tests/api_resources/genai/test_indexing_jobs.py +++ b/tests/api_resources/test_indexing_jobs.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai import ( +from digitalocean_genai_sdk.types import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -26,13 +26,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.create() + indexing_job = client.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.create( + indexing_job = client.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.indexing_jobs.with_raw_response.create() + response = client.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.indexing_jobs.with_streaming_response.create() as response: + with client.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.retrieve( + indexing_job = client.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.indexing_jobs.with_raw_response.retrieve( + response = client.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.indexing_jobs.with_streaming_response.retrieve( + with client.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @parametrize def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.indexing_jobs.with_raw_response.retrieve( + client.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.list() + indexing_job = client.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.list( + indexing_job = client.indexing_jobs.list( page=0, per_page=0, ) @@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.indexing_jobs.with_raw_response.list() + response = client.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -130,7 +130,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.indexing_jobs.with_streaming_response.list() as response: + with client.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.retrieve_data_sources( + indexing_job = client.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize def test_raw_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + response = client.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.indexing_jobs.with_streaming_response.retrieve_data_sources( + with client.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: DigitaloceanGena @parametrize def test_path_params_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + client.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize def test_method_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.update_cancel( + indexing_job = client.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_cancel_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.genai.indexing_jobs.update_cancel( + indexing_job = client.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: DigitaloceanGenaiSDK @pytest.mark.skip() @parametrize def test_raw_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.indexing_jobs.with_raw_response.update_cancel( + response = client.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.indexing_jobs.with_streaming_response.update_cancel( + with client.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> @parametrize def test_path_params_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.genai.indexing_jobs.with_raw_response.update_cancel( + client.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) @@ -239,13 +239,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.create() + indexing_job = await async_client.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.create( + indexing_job = await async_client.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -254,7 +254,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.indexing_jobs.with_raw_response.create() + response = await async_client.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -264,7 +264,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.indexing_jobs.with_streaming_response.create() as response: + async with async_client.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -276,7 +276,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.retrieve( + indexing_job = await async_client.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -284,7 +284,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.indexing_jobs.with_raw_response.retrieve( + response = await async_client.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -296,7 +296,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.indexing_jobs.with_streaming_response.retrieve( + async with async_client.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -311,20 +311,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @parametrize async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.indexing_jobs.with_raw_response.retrieve( + await async_client.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.list() + indexing_job = await async_client.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.list( + indexing_job = await async_client.indexing_jobs.list( page=0, per_page=0, ) @@ -333,7 +333,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.indexing_jobs.with_raw_response.list() + response = await async_client.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -343,7 +343,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.indexing_jobs.with_streaming_response.list() as response: + async with async_client.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -355,7 +355,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.retrieve_data_sources( + indexing_job = await async_client.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -363,7 +363,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncDigitalocea @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -375,7 +375,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncDigit @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.indexing_jobs.with_streaming_response.retrieve_data_sources( + async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -390,14 +390,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.genai.indexing_jobs.with_raw_response.retrieve_data_sources( + await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize async def test_method_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.update_cancel( + indexing_job = await async_client.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -405,7 +405,7 @@ async def test_method_update_cancel(self, async_client: AsyncDigitaloceanGenaiSD @pytest.mark.skip() @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.genai.indexing_jobs.update_cancel( + indexing_job = await async_client.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -414,7 +414,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncDig @pytest.mark.skip() @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.indexing_jobs.with_raw_response.update_cancel( + response = await async_client.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -426,7 +426,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncDigitaloceanG @pytest.mark.skip() @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.indexing_jobs.with_streaming_response.update_cancel( + async with async_client.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -441,6 +441,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncDigital @parametrize async def test_path_params_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.genai.indexing_jobs.with_raw_response.update_cancel( + await async_client.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) diff --git a/tests/api_resources/genai/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py similarity index 84% rename from tests/api_resources/genai/test_knowledge_bases.py rename to tests/api_resources/test_knowledge_bases.py index 33e2a99b..34e3d753 100644 --- a/tests/api_resources/genai/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.genai import ( +from digitalocean_genai_sdk.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, @@ -26,13 +26,13 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.create() + knowledge_base = client.knowledge_bases.create() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.create( + knowledge_base = client.knowledge_bases.create( database_id="database_id", datasources=[ { @@ -68,7 +68,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.with_raw_response.create() + response = client.knowledge_bases.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -78,7 +78,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.with_streaming_response.create() as response: + with client.knowledge_bases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -90,7 +90,7 @@ def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.retrieve( + knowledge_base = client.knowledge_bases.retrieve( "uuid", ) assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) @@ -98,7 +98,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.with_raw_response.retrieve( + response = client.knowledge_bases.with_raw_response.retrieve( "uuid", ) @@ -110,7 +110,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.with_streaming_response.retrieve( + with client.knowledge_bases.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -125,14 +125,14 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @parametrize def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.knowledge_bases.with_raw_response.retrieve( + client.knowledge_bases.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.update( + knowledge_base = client.knowledge_bases.update( path_uuid="uuid", ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -140,7 +140,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.update( + knowledge_base = client.knowledge_bases.update( path_uuid="uuid", database_id="database_id", embedding_model_uuid="embedding_model_uuid", @@ -154,7 +154,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.with_raw_response.update( + response = client.knowledge_bases.with_raw_response.update( path_uuid="uuid", ) @@ -166,7 +166,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.with_streaming_response.update( + with client.knowledge_bases.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -181,20 +181,20 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.genai.knowledge_bases.with_raw_response.update( + client.knowledge_bases.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.list() + knowledge_base = client.knowledge_bases.list() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.list( + knowledge_base = client.knowledge_bases.list( page=0, per_page=0, ) @@ -203,7 +203,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.with_raw_response.list() + response = client.knowledge_bases.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -213,7 +213,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.with_streaming_response.list() as response: + with client.knowledge_bases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -225,7 +225,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.genai.knowledge_bases.delete( + knowledge_base = client.knowledge_bases.delete( "uuid", ) assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) @@ -233,7 +233,7 @@ def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.genai.knowledge_bases.with_raw_response.delete( + response = client.knowledge_bases.with_raw_response.delete( "uuid", ) @@ -245,7 +245,7 @@ def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.genai.knowledge_bases.with_streaming_response.delete( + with client.knowledge_bases.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed @@ -260,7 +260,7 @@ def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: @parametrize def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.genai.knowledge_bases.with_raw_response.delete( + client.knowledge_bases.with_raw_response.delete( "", ) @@ -271,13 +271,13 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.create() + knowledge_base = await async_client.knowledge_bases.create() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.create( + knowledge_base = await async_client.knowledge_bases.create( database_id="database_id", datasources=[ { @@ -313,7 +313,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.with_raw_response.create() + response = await async_client.knowledge_bases.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -323,7 +323,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.with_streaming_response.create() as response: + async with async_client.knowledge_bases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -335,7 +335,7 @@ async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.retrieve( + knowledge_base = await async_client.knowledge_bases.retrieve( "uuid", ) assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) @@ -343,7 +343,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.with_raw_response.retrieve( + response = await async_client.knowledge_bases.with_raw_response.retrieve( "uuid", ) @@ -355,7 +355,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.with_streaming_response.retrieve( + async with async_client.knowledge_bases.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -370,14 +370,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @parametrize async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.knowledge_bases.with_raw_response.retrieve( + await async_client.knowledge_bases.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.update( + knowledge_base = await async_client.knowledge_bases.update( path_uuid="uuid", ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -385,7 +385,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.update( + knowledge_base = await async_client.knowledge_bases.update( path_uuid="uuid", database_id="database_id", embedding_model_uuid="embedding_model_uuid", @@ -399,7 +399,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.with_raw_response.update( + response = await async_client.knowledge_bases.with_raw_response.update( path_uuid="uuid", ) @@ -411,7 +411,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.with_streaming_response.update( + async with async_client.knowledge_bases.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -426,20 +426,20 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.genai.knowledge_bases.with_raw_response.update( + await async_client.knowledge_bases.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.list() + knowledge_base = await async_client.knowledge_bases.list() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.list( + knowledge_base = await async_client.knowledge_bases.list( page=0, per_page=0, ) @@ -448,7 +448,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.with_raw_response.list() + response = await async_client.knowledge_bases.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -458,7 +458,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.with_streaming_response.list() as response: + async with async_client.knowledge_bases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -470,7 +470,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.genai.knowledge_bases.delete( + knowledge_base = await async_client.knowledge_bases.delete( "uuid", ) assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) @@ -478,7 +478,7 @@ async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.genai.knowledge_bases.with_raw_response.delete( + response = await async_client.knowledge_bases.with_raw_response.delete( "uuid", ) @@ -490,7 +490,7 @@ async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.genai.knowledge_bases.with_streaming_response.delete( + async with async_client.knowledge_bases.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed @@ -505,6 +505,6 @@ async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGe @parametrize async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.genai.knowledge_bases.with_raw_response.delete( + await async_client.knowledge_bases.with_raw_response.delete( "", ) diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py new file mode 100644 index 00000000..f36b6c63 --- /dev/null +++ b/tests/api_resources/test_regions.py @@ -0,0 +1,96 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from digitalocean_genai_sdk.types import RegionListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRegions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + region = client.regions.list() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + region = client.regions.list( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + response = client.regions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + with client.regions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncRegions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + region = await async_client.regions.list() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + region = await async_client.regions.list( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + response = await async_client.regions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = await response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async with async_client.regions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = await response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/test_client.py b/tests/test_client.py index 72994fee..d6412ded 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -778,7 +778,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.genai.agents.versions.with_raw_response.list(uuid="uuid") + response = client.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -802,7 +802,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.genai.agents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -827,7 +827,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.genai.agents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) @@ -1584,7 +1584,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.genai.agents.versions.with_raw_response.list(uuid="uuid") + response = await client.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1609,7 +1609,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.genai.agents.versions.with_raw_response.list( + response = await client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -1635,7 +1635,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.genai.agents.versions.with_raw_response.list( + response = await client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) From fcf206550fd60d6d1f044669ac061ff5a91a922a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 16:12:19 +0000 Subject: [PATCH 013/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 6 +++--- src/digitalocean_genai_sdk/types/__init__.py | 3 +++ src/digitalocean_genai_sdk/types/agents/__init__.py | 1 - .../types/agents/api_key_create_response.py | 2 +- .../types/agents/api_key_delete_response.py | 2 +- .../types/agents/api_key_list_response.py | 2 +- .../types/agents/api_key_regenerate_response.py | 2 +- .../types/agents/api_key_update_response.py | 2 +- src/digitalocean_genai_sdk/types/api_agent.py | 6 +++--- .../types/{agents => }/api_agent_api_key_info.py | 2 +- .../{providers/anthropic => }/api_anthropic_api_key_info.py | 2 +- .../types/{providers/openai => }/api_openai_api_key_info.py | 4 ++-- .../types/providers/anthropic/__init__.py | 1 - .../types/providers/anthropic/key_create_response.py | 2 +- .../types/providers/anthropic/key_delete_response.py | 2 +- .../types/providers/anthropic/key_list_response.py | 2 +- .../types/providers/anthropic/key_retrieve_response.py | 2 +- .../types/providers/anthropic/key_update_response.py | 2 +- .../types/providers/openai/__init__.py | 1 - .../types/providers/openai/key_create_response.py | 2 +- .../types/providers/openai/key_delete_response.py | 2 +- .../types/providers/openai/key_list_response.py | 2 +- .../types/providers/openai/key_retrieve_response.py | 2 +- .../types/providers/openai/key_update_response.py | 2 +- 25 files changed, 29 insertions(+), 29 deletions(-) rename src/digitalocean_genai_sdk/types/{agents => }/api_agent_api_key_info.py (93%) rename src/digitalocean_genai_sdk/types/{providers/anthropic => }/api_anthropic_api_key_info.py (93%) rename src/digitalocean_genai_sdk/types/{providers/openai => }/api_openai_api_key_info.py (88%) diff --git a/.stats.yml b/.stats.yml index 0c4b9912..ceea5658 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 25c073783b334ca5170ad34fa03ed57f +config_hash: 6a4a54166267ede5a1822b47ec8c540f diff --git a/api.md b/api.md index 5457215d..32dbe7df 100644 --- a/api.md +++ b/api.md @@ -5,8 +5,11 @@ Types: ```python from digitalocean_genai_sdk.types import ( APIAgent, + APIAgentAPIKeyInfo, + APIAnthropicAPIKeyInfo, APIDeploymentVisibility, APIModel, + APIOpenAIAPIKeyInfo, APIRetrievalMethod, AgentCreateResponse, AgentRetrieveResponse, @@ -32,7 +35,6 @@ Types: ```python from digitalocean_genai_sdk.types.agents import ( - APIAgentAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -132,7 +134,6 @@ Types: ```python from digitalocean_genai_sdk.types.providers.anthropic import ( - APIAnthropicAPIKeyInfo, KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -159,7 +160,6 @@ Types: ```python from digitalocean_genai_sdk.types.providers.openai import ( - APIOpenAIAPIKeyInfo, KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py index 1f81739b..ee516f83 100644 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ b/src/digitalocean_genai_sdk/types/__init__.py @@ -22,12 +22,15 @@ from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams diff --git a/src/digitalocean_genai_sdk/types/agents/__init__.py b/src/digitalocean_genai_sdk/types/agents/__init__.py index b9f85c2d..aae0ee6b 100644 --- a/src/digitalocean_genai_sdk/types/agents/__init__.py +++ b/src/digitalocean_genai_sdk/types/agents/__init__.py @@ -11,7 +11,6 @@ from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams -from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams from .function_create_params import FunctionCreateParams as FunctionCreateParams from .function_update_params import FunctionUpdateParams as FunctionUpdateParams diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py index 608927cd..09689fe7 100644 --- a/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py @@ -3,7 +3,7 @@ from typing import Optional from ..._models import BaseModel -from .api_agent_api_key_info import APIAgentAPIKeyInfo +from ..api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py index e114f0d4..02b03f61 100644 --- a/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py @@ -3,7 +3,7 @@ from typing import Optional from ..._models import BaseModel -from .api_agent_api_key_info import APIAgentAPIKeyInfo +from ..api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py index 14c0d15a..eff98649 100644 --- a/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py @@ -5,7 +5,7 @@ from .api_meta import APIMeta from ..._models import BaseModel from .api_links import APILinks -from .api_agent_api_key_info import APIAgentAPIKeyInfo +from ..api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py index 80b8869a..ea2f761e 100644 --- a/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py @@ -3,7 +3,7 @@ from typing import Optional from ..._models import BaseModel -from .api_agent_api_key_info import APIAgentAPIKeyInfo +from ..api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyRegenerateResponse"] diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py index a79c4a36..87442329 100644 --- a/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py +++ b/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py @@ -3,7 +3,7 @@ from typing import Optional from ..._models import BaseModel -from .api_agent_api_key_info import APIAgentAPIKeyInfo +from ..api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/api_agent.py b/src/digitalocean_genai_sdk/types/api_agent.py index cba97a42..d6e18ca2 100644 --- a/src/digitalocean_genai_sdk/types/api_agent.py +++ b/src/digitalocean_genai_sdk/types/api_agent.py @@ -10,10 +10,10 @@ from .api_model import APIModel from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod +from .api_agent_api_key_info import APIAgentAPIKeyInfo +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo from .api_deployment_visibility import APIDeploymentVisibility -from .agents.api_agent_api_key_info import APIAgentAPIKeyInfo -from .providers.openai.api_openai_api_key_info import APIOpenAIAPIKeyInfo -from .providers.anthropic.api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = [ "APIAgent", diff --git a/src/digitalocean_genai_sdk/types/agents/api_agent_api_key_info.py b/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py similarity index 93% rename from src/digitalocean_genai_sdk/types/agents/api_agent_api_key_info.py rename to src/digitalocean_genai_sdk/types/api_agent_api_key_info.py index 46e9388b..8dc71564 100644 --- a/src/digitalocean_genai_sdk/types/agents/api_agent_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIAgentAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/api_anthropic_api_key_info.py b/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py similarity index 93% rename from src/digitalocean_genai_sdk/types/providers/anthropic/api_anthropic_api_key_info.py rename to src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py index 8459c11c..e2e04a8a 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/api_anthropic_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ...._models import BaseModel +from .._models import BaseModel __all__ = ["APIAnthropicAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/providers/openai/api_openai_api_key_info.py b/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py similarity index 88% rename from src/digitalocean_genai_sdk/types/providers/openai/api_openai_api_key_info.py rename to src/digitalocean_genai_sdk/types/api_openai_api_key_info.py index 9bf7082f..39328f80 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/api_openai_api_key_info.py +++ b/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py @@ -3,8 +3,8 @@ from typing import List, Optional from datetime import datetime -from ...._models import BaseModel -from ...api_model import APIModel +from .._models import BaseModel +from .api_model import APIModel __all__ = ["APIOpenAIAPIKeyInfo"] diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py index 019eab26..eb47e709 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py @@ -12,4 +12,3 @@ from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py index 5968aee0..a032810c 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py index d9467f40..2afe2dda 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py index 25c2895c..d0b84e96 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py @@ -5,7 +5,7 @@ from ...._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py index 3b185df8..b8361fc2 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py index a73dabbd..b04277a6 100644 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py +++ b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyUpdateResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/openai/__init__.py b/src/digitalocean_genai_sdk/types/providers/openai/__init__.py index 46746a06..70abf332 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/__init__.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/__init__.py @@ -10,6 +10,5 @@ from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse from .key_update_response import KeyUpdateResponse as KeyUpdateResponse from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py index 37dc65a9..f3b4d36c 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyCreateResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py index 7f07a584..0c8922bb 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyDeleteResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py index b5f5884b..c263cba3 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py @@ -5,7 +5,7 @@ from ...._models import BaseModel from ...agents.api_meta import APIMeta from ...agents.api_links import APILinks -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py index 03cd8573..7015b6f7 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyRetrieveResponse"] diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py index f14b65ce..4889f994 100644 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py +++ b/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py @@ -3,7 +3,7 @@ from typing import Optional from ...._models import BaseModel -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyUpdateResponse"] From c41a148d55311e7f92e1e209f3487e17191136dd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:43:52 +0000 Subject: [PATCH 014/200] chore: update SDK settings --- .github/workflows/publish-pypi.yml | 31 +++++++++ .github/workflows/release-doctor.yml | 21 ++++++ .release-please-manifest.json | 3 + .stats.yml | 2 +- CONTRIBUTING.md | 4 +- README.md | 10 +-- bin/check-release-environment | 21 ++++++ pyproject.toml | 6 +- release-please-config.json | 66 +++++++++++++++++++ src/digitalocean_genai_sdk/_version.py | 2 +- .../resources/agents/agents.py | 8 +-- .../resources/agents/api_keys.py | 8 +-- .../resources/agents/child_agents.py | 8 +-- .../resources/agents/functions.py | 8 +-- .../resources/agents/knowledge_bases.py | 8 +-- .../resources/agents/versions.py | 8 +-- .../resources/api_keys/api_keys.py | 8 +-- .../resources/api_keys/api_keys_.py | 8 +-- .../resources/auth/agents/agents.py | 8 +-- .../resources/auth/agents/token.py | 8 +-- .../resources/auth/auth.py | 8 +-- src/digitalocean_genai_sdk/resources/chat.py | 8 +-- .../resources/embeddings.py | 8 +-- .../resources/indexing_jobs.py | 8 +-- .../resources/knowledge_bases/data_sources.py | 8 +-- .../knowledge_bases/knowledge_bases.py | 8 +-- .../resources/models.py | 8 +-- .../providers/anthropic/anthropic.py | 8 +-- .../resources/providers/anthropic/keys.py | 8 +-- .../resources/providers/openai/keys.py | 8 +-- .../resources/providers/openai/openai.py | 8 +-- .../resources/providers/providers.py | 8 +-- .../resources/regions.py | 8 +-- 33 files changed, 246 insertions(+), 104 deletions(-) create mode 100644 .github/workflows/publish-pypi.yml create mode 100644 .github/workflows/release-doctor.yml create mode 100644 .release-please-manifest.json create mode 100644 bin/check-release-environment create mode 100644 release-please-config.json diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 00000000..2bc5b4b2 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,31 @@ +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml +name: Publish PyPI +on: + workflow_dispatch: + + release: + types: [published] + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml new file mode 100644 index 00000000..0f23cbc4 --- /dev/null +++ b/.github/workflows/release-doctor.yml @@ -0,0 +1,21 @@ +name: Release Doctor +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + release_doctor: + name: release doctor + runs-on: ubuntu-latest + if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + + steps: + - uses: actions/checkout@v4 + + - name: Check release environment + run: | + bash ./bin/check-release-environment + env: + PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000..c4762802 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.0.1-alpha.0" +} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index ceea5658..aefe5e55 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 6a4a54166267ede5a1822b47ec8c540f +config_hash: 70991672c5e8fe09c40fc600f422bc55 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4e0d206f..7d5d60a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git +$ pip install git+ssh://git@github.com/digitalocean/genai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 81df1124..0e282738 100644 --- a/README.md +++ b/README.md @@ -15,8 +15,8 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ## Installation ```sh -# install from this staging repo -pip install git+ssh://git@github.com/stainless-sdks/digitalocean-genai-sdk-python.git +# install from the production repo +pip install git+ssh://git@github.com/digitalocean/genai-python.git ``` > [!NOTE] @@ -238,9 +238,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -346,7 +346,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/bin/check-release-environment b/bin/check-release-environment new file mode 100644 index 00000000..9e89a88a --- /dev/null +++ b/bin/check-release-environment @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +errors=() + +if [ -z "${PYPI_TOKEN}" ]; then + errors+=("The DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") +fi + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/pyproject.toml b/pyproject.toml index 73fa240b..7dc7018a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" -Repository = "https://github.com/stainless-sdks/digitalocean-genai-sdk-python" +Homepage = "https://github.com/digitalocean/genai-python" +Repository = "https://github.com/digitalocean/genai-python" [tool.rye] @@ -121,7 +121,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/stainless-sdks/digitalocean-genai-sdk-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000..234b9475 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,66 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "python", + "extra-files": [ + "src/digitalocean_genai_sdk/_version.py" + ] +} \ No newline at end of file diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py index 5c4fa53a..bb83d491 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/digitalocean_genai_sdk/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "digitalocean_genai_sdk" -__version__ = "0.0.1-alpha.0" +__version__ = "0.0.1-alpha.0" # x-release-please-version diff --git a/src/digitalocean_genai_sdk/resources/agents/agents.py b/src/digitalocean_genai_sdk/resources/agents/agents.py index 2c0a11ed..6d3ce525 100644 --- a/src/digitalocean_genai_sdk/resources/agents/agents.py +++ b/src/digitalocean_genai_sdk/resources/agents/agents.py @@ -104,7 +104,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -460,7 +460,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -469,7 +469,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/agents/api_keys.py b/src/digitalocean_genai_sdk/resources/agents/api_keys.py index bf4adb26..451f5cb5 100644 --- a/src/digitalocean_genai_sdk/resources/agents/api_keys.py +++ b/src/digitalocean_genai_sdk/resources/agents/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -268,7 +268,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -277,7 +277,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/agents/child_agents.py b/src/digitalocean_genai_sdk/resources/agents/child_agents.py index ad2427f9..7d4ed3bb 100644 --- a/src/digitalocean_genai_sdk/resources/agents/child_agents.py +++ b/src/digitalocean_genai_sdk/resources/agents/child_agents.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChildAgentsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChildAgentsResourceWithStreamingResponse(self) @@ -237,7 +237,7 @@ def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChildAgentsResourceWithRawResponse(self) @@ -246,7 +246,7 @@ def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChildAgentsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/agents/functions.py b/src/digitalocean_genai_sdk/resources/agents/functions.py index a1025806..89f9efa3 100644 --- a/src/digitalocean_genai_sdk/resources/agents/functions.py +++ b/src/digitalocean_genai_sdk/resources/agents/functions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> FunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FunctionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FunctionsResourceWithStreamingResponse(self) @@ -199,7 +199,7 @@ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFunctionsResourceWithRawResponse(self) @@ -208,7 +208,7 @@ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFunctionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py index ba190305..4a091446 100644 --- a/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py +++ b/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -160,7 +160,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -169,7 +169,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/agents/versions.py b/src/digitalocean_genai_sdk/resources/agents/versions.py index af4597d1..e77a252b 100644 --- a/src/digitalocean_genai_sdk/resources/agents/versions.py +++ b/src/digitalocean_genai_sdk/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -143,7 +143,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -152,7 +152,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py index e55b3051..63091bcc 100644 --- a/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py +++ b/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py @@ -36,7 +36,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -45,7 +45,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -135,7 +135,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -144,7 +144,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py index 3fc2f4f7..70b1147a 100644 --- a/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py +++ b/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -242,7 +242,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -251,7 +251,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/agents.py b/src/digitalocean_genai_sdk/resources/auth/agents/agents.py index 3a5ba673..a0aa9faf 100644 --- a/src/digitalocean_genai_sdk/resources/auth/agents/agents.py +++ b/src/digitalocean_genai_sdk/resources/auth/agents/agents.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/token.py b/src/digitalocean_genai_sdk/resources/auth/agents/token.py index 89caaf10..73ecef05 100644 --- a/src/digitalocean_genai_sdk/resources/auth/agents/token.py +++ b/src/digitalocean_genai_sdk/resources/auth/agents/token.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> TokenResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return TokenResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> TokenResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return TokenResourceWithStreamingResponse(self) @@ -85,7 +85,7 @@ def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncTokenResourceWithRawResponse(self) @@ -94,7 +94,7 @@ def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncTokenResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/auth/auth.py b/src/digitalocean_genai_sdk/resources/auth/auth.py index 854ac636..985fc56c 100644 --- a/src/digitalocean_genai_sdk/resources/auth/auth.py +++ b/src/digitalocean_genai_sdk/resources/auth/auth.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AuthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AuthResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AuthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AuthResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAuthResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAuthResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/chat.py b/src/digitalocean_genai_sdk/resources/chat.py index e193c696..518fbad8 100644 --- a/src/digitalocean_genai_sdk/resources/chat.py +++ b/src/digitalocean_genai_sdk/resources/chat.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -191,7 +191,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -200,7 +200,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/digitalocean_genai_sdk/resources/embeddings.py index 95146443..1bcd3145 100644 --- a/src/digitalocean_genai_sdk/resources/embeddings.py +++ b/src/digitalocean_genai_sdk/resources/embeddings.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -101,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -110,7 +110,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/indexing_jobs.py b/src/digitalocean_genai_sdk/resources/indexing_jobs.py index e0ea9839..7649a7a7 100644 --- a/src/digitalocean_genai_sdk/resources/indexing_jobs.py +++ b/src/digitalocean_genai_sdk/resources/indexing_jobs.py @@ -34,7 +34,7 @@ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return IndexingJobsResourceWithRawResponse(self) @@ -43,7 +43,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return IndexingJobsResourceWithStreamingResponse(self) @@ -250,7 +250,7 @@ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncIndexingJobsResourceWithRawResponse(self) @@ -259,7 +259,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncIndexingJobsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py index 2576eaeb..b8a29c4a 100644 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py +++ b/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> DataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return DataSourcesResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return DataSourcesResourceWithStreamingResponse(self) @@ -196,7 +196,7 @@ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncDataSourcesResourceWithRawResponse(self) @@ -205,7 +205,7 @@ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncDataSourcesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py index 87687615..713aca63 100644 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py +++ b/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py @@ -46,7 +46,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -55,7 +55,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -312,7 +312,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -321,7 +321,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/digitalocean_genai_sdk/resources/models.py index 0fbf131b..81b75441 100644 --- a/src/digitalocean_genai_sdk/resources/models.py +++ b/src/digitalocean_genai_sdk/resources/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py index 39565bbf..64783563 100644 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py +++ b/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py index 470e43ce..1f65a5ab 100644 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py +++ b/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -303,7 +303,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -312,7 +312,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/keys.py b/src/digitalocean_genai_sdk/resources/providers/openai/keys.py index 4991d56c..06e7a23c 100644 --- a/src/digitalocean_genai_sdk/resources/providers/openai/keys.py +++ b/src/digitalocean_genai_sdk/resources/providers/openai/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -301,7 +301,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -310,7 +310,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/openai.py b/src/digitalocean_genai_sdk/resources/providers/openai/openai.py index 89083056..d29fd062 100644 --- a/src/digitalocean_genai_sdk/resources/providers/openai/openai.py +++ b/src/digitalocean_genai_sdk/resources/providers/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/providers/providers.py b/src/digitalocean_genai_sdk/resources/providers/providers.py index d0963ce6..50e3db1a 100644 --- a/src/digitalocean_genai_sdk/resources/providers/providers.py +++ b/src/digitalocean_genai_sdk/resources/providers/providers.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/regions.py b/src/digitalocean_genai_sdk/resources/regions.py index 5e06213b..d506688b 100644 --- a/src/digitalocean_genai_sdk/resources/regions.py +++ b/src/digitalocean_genai_sdk/resources/regions.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> RegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return RegionsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return RegionsResourceWithStreamingResponse(self) @@ -95,7 +95,7 @@ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncRegionsResourceWithRawResponse(self) @@ -104,7 +104,7 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/stainless-sdks/digitalocean-genai-sdk-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncRegionsResourceWithStreamingResponse(self) From 662994a204e130a96c636872309a8184c229fa81 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:44:35 +0000 Subject: [PATCH 015/200] chore: update SDK settings --- .stats.yml | 2 +- README.md | 9 +++------ pyproject.toml | 2 +- requirements-dev.lock | 12 ++++++------ requirements.lock | 12 ++++++------ 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index aefe5e55..71357274 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 70991672c5e8fe09c40fc600f422bc55 +config_hash: 6971c3b2047437e125da9f34d60abd94 diff --git a/README.md b/README.md index 0e282738..91898189 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Digitalocean Genai SDK Python API library -[![PyPI version](https://img.shields.io/pypi/v/digitalocean_genai_sdk.svg)](https://pypi.org/project/digitalocean_genai_sdk/) +[![PyPI version](https://img.shields.io/pypi/v/genai-python.svg)](https://pypi.org/project/genai-python/) The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -15,13 +15,10 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ## Installation ```sh -# install from the production repo -pip install git+ssh://git@github.com/digitalocean/genai-python.git +# install from PyPI +pip install --pre genai-python ``` -> [!NOTE] -> Once this package is [published to PyPI](https://app.stainless.com/docs/guides/publish), this will become: `pip install --pre digitalocean_genai_sdk` - ## Usage The full API of this library can be found in [api.md](api.md). diff --git a/pyproject.toml b/pyproject.toml index 7dc7018a..d70c40bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "digitalocean_genai_sdk" +name = "genai-python" version = "0.0.1-alpha.0" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] diff --git a/requirements-dev.lock b/requirements-dev.lock index bf449af3..a8ab153d 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,7 +13,7 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via digitalocean-genai-sdk + # via genai-python # via httpx argcomplete==3.1.2 # via nox @@ -26,7 +26,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via digitalocean-genai-sdk + # via genai-python exceptiongroup==1.2.2 # via anyio # via pytest @@ -37,7 +37,7 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via digitalocean-genai-sdk + # via genai-python # via respx idna==3.4 # via anyio @@ -64,7 +64,7 @@ platformdirs==3.11.0 pluggy==1.5.0 # via pytest pydantic==2.10.3 - # via digitalocean-genai-sdk + # via genai-python pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -86,14 +86,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via digitalocean-genai-sdk + # via genai-python time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via digitalocean-genai-sdk + # via genai-python # via mypy # via pydantic # via pydantic-core diff --git a/requirements.lock b/requirements.lock index e655776d..a61bce5e 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,13 +13,13 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via digitalocean-genai-sdk + # via genai-python # via httpx certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via digitalocean-genai-sdk + # via genai-python exceptiongroup==1.2.2 # via anyio h11==0.14.0 @@ -27,19 +27,19 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via digitalocean-genai-sdk + # via genai-python idna==3.4 # via anyio # via httpx pydantic==2.10.3 - # via digitalocean-genai-sdk + # via genai-python pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via digitalocean-genai-sdk + # via genai-python typing-extensions==4.12.2 # via anyio - # via digitalocean-genai-sdk + # via genai-python # via pydantic # via pydantic-core From 74f783b94d0a2ad7479676e7ca3df3c0d9ba79cb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:47:03 +0000 Subject: [PATCH 016/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/digitalocean_genai_sdk/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c4762802..ba6c3483 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.0.1-alpha.0" + ".": "0.1.0-alpha.1" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index d70c40bd..8fcdc580 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "genai-python" -version = "0.0.1-alpha.0" +version = "0.1.0-alpha.1" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py index bb83d491..a788b3f9 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/digitalocean_genai_sdk/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "digitalocean_genai_sdk" -__version__ = "0.0.1-alpha.0" # x-release-please-version +__version__ = "0.1.0-alpha.1" # x-release-please-version From b1eaf880a5d433d28076e9ccf1042da4314d2666 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:55:30 +0000 Subject: [PATCH 017/200] chore: update SDK settings --- .stats.yml | 2 +- README.md | 4 ++-- pyproject.toml | 2 +- requirements-dev.lock | 12 ++++++------ requirements.lock | 12 ++++++------ 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.stats.yml b/.stats.yml index 71357274..0c46095d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 6971c3b2047437e125da9f34d60abd94 +config_hash: a5e87f457b74bf2341470b4cc1c719d6 diff --git a/README.md b/README.md index 91898189..e47ef7a0 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Digitalocean Genai SDK Python API library -[![PyPI version](https://img.shields.io/pypi/v/genai-python.svg)](https://pypi.org/project/genai-python/) +[![PyPI version](https://img.shields.io/pypi/v/do-genai-python.svg)](https://pypi.org/project/do-genai-python/) The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -16,7 +16,7 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ```sh # install from PyPI -pip install --pre genai-python +pip install --pre do-genai-python ``` ## Usage diff --git a/pyproject.toml b/pyproject.toml index 8fcdc580..43bdf659 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "genai-python" +name = "do-genai-python" version = "0.1.0-alpha.1" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] diff --git a/requirements-dev.lock b/requirements-dev.lock index a8ab153d..5b5ac1c0 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,7 +13,7 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via genai-python + # via do-genai-python # via httpx argcomplete==3.1.2 # via nox @@ -26,7 +26,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via genai-python + # via do-genai-python exceptiongroup==1.2.2 # via anyio # via pytest @@ -37,7 +37,7 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via genai-python + # via do-genai-python # via respx idna==3.4 # via anyio @@ -64,7 +64,7 @@ platformdirs==3.11.0 pluggy==1.5.0 # via pytest pydantic==2.10.3 - # via genai-python + # via do-genai-python pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -86,14 +86,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via genai-python + # via do-genai-python time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via genai-python + # via do-genai-python # via mypy # via pydantic # via pydantic-core diff --git a/requirements.lock b/requirements.lock index a61bce5e..174d5e2e 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,13 +13,13 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via genai-python + # via do-genai-python # via httpx certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via genai-python + # via do-genai-python exceptiongroup==1.2.2 # via anyio h11==0.14.0 @@ -27,19 +27,19 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via genai-python + # via do-genai-python idna==3.4 # via anyio # via httpx pydantic==2.10.3 - # via genai-python + # via do-genai-python pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via genai-python + # via do-genai-python typing-extensions==4.12.2 # via anyio - # via genai-python + # via do-genai-python # via pydantic # via pydantic-core From 0426ecd936b08e383188f2d90e6231fd7bb62b61 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 14:57:39 +0000 Subject: [PATCH 018/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/digitalocean_genai_sdk/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ba6c3483..f14b480a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.1" + ".": "0.1.0-alpha.2" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 43bdf659..c670b05d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "do-genai-python" -version = "0.1.0-alpha.1" +version = "0.1.0-alpha.2" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py index a788b3f9..7eac57f8 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/digitalocean_genai_sdk/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "digitalocean_genai_sdk" -__version__ = "0.1.0-alpha.1" # x-release-please-version +__version__ = "0.1.0-alpha.2" # x-release-please-version From 404287859e859ad050979ceeac2b0297b9f22e85 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 15:09:03 +0000 Subject: [PATCH 019/200] chore: update SDK settings --- .stats.yml | 2 +- README.md | 4 ++-- pyproject.toml | 2 +- requirements-dev.lock | 12 ++++++------ requirements.lock | 12 ++++++------ 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0c46095d..eb8f1c2d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: a5e87f457b74bf2341470b4cc1c719d6 +config_hash: 565bf6264bdf2a317cc5e2f02d02a702 diff --git a/README.md b/README.md index e47ef7a0..7b7f4731 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Digitalocean Genai SDK Python API library -[![PyPI version](https://img.shields.io/pypi/v/do-genai-python.svg)](https://pypi.org/project/do-genai-python/) +[![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg)](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -16,7 +16,7 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ```sh # install from PyPI -pip install --pre do-genai-python +pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python ``` ## Usage diff --git a/pyproject.toml b/pyproject.toml index c670b05d..57c92966 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "do-genai-python" +name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" version = "0.1.0-alpha.2" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] diff --git a/requirements-dev.lock b/requirements-dev.lock index 5b5ac1c0..f784e9a3 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,7 +13,7 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via httpx argcomplete==3.1.2 # via nox @@ -26,7 +26,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python exceptiongroup==1.2.2 # via anyio # via pytest @@ -37,7 +37,7 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via respx idna==3.4 # via anyio @@ -64,7 +64,7 @@ platformdirs==3.11.0 pluggy==1.5.0 # via pytest pydantic==2.10.3 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -86,14 +86,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via mypy # via pydantic # via pydantic-core diff --git a/requirements.lock b/requirements.lock index 174d5e2e..dab2f6ce 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,13 +13,13 @@ annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via httpx certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python exceptiongroup==1.2.2 # via anyio h11==0.14.0 @@ -27,19 +27,19 @@ h11==0.14.0 httpcore==1.0.2 # via httpx httpx==0.28.1 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio # via httpx pydantic==2.10.3 - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python typing-extensions==4.12.2 # via anyio - # via do-genai-python + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via pydantic # via pydantic-core From 0a47a63640377bf9b43d8770656bef2ee235a778 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 16:01:02 +0000 Subject: [PATCH 020/200] chore: update SDK settings --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/digitalocean_genai_sdk/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f14b480a..aaf968a1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.2" + ".": "0.1.0-alpha.3" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 57c92966..9dd31517 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.2" +version = "0.1.0-alpha.3" description = "The official Python library for the digitalocean-genai-sdk API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/digitalocean_genai_sdk/_version.py b/src/digitalocean_genai_sdk/_version.py index 7eac57f8..50483bc2 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/digitalocean_genai_sdk/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "digitalocean_genai_sdk" -__version__ = "0.1.0-alpha.2" # x-release-please-version +__version__ = "0.1.0-alpha.3" # x-release-please-version From 5ed160dbb55445636be37a40e8afa3d5003a9057 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 02:15:06 +0000 Subject: [PATCH 021/200] chore(tests): run tests in parallel --- pyproject.toml | 3 ++- requirements-dev.lock | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9dd31517..03329f8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ dev-dependencies = [ "importlib-metadata>=6.7.0", "rich>=13.7.1", "nest_asyncio==1.6.0", + "pytest-xdist>=3.6.1", ] [tool.rye.scripts] @@ -125,7 +126,7 @@ replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2> [tool.pytest.ini_options] testpaths = ["tests"] -addopts = "--tb=short" +addopts = "--tb=short -n auto" xfail_strict = true asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" diff --git a/requirements-dev.lock b/requirements-dev.lock index f784e9a3..1e074a56 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -30,6 +30,8 @@ distro==1.8.0 exceptiongroup==1.2.2 # via anyio # via pytest +execnet==2.1.1 + # via pytest-xdist filelock==3.12.4 # via virtualenv h11==0.14.0 @@ -72,7 +74,9 @@ pygments==2.18.0 pyright==1.1.399 pytest==8.3.3 # via pytest-asyncio + # via pytest-xdist pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 From caf9364e830f37385a0a74bb7bba0ab33c26f440 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 02:39:39 +0000 Subject: [PATCH 022/200] fix(client): correctly parse binary response | stream --- src/digitalocean_genai_sdk/_base_client.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/digitalocean_genai_sdk/_base_client.py index 73cd30fc..6fd247cc 100644 --- a/src/digitalocean_genai_sdk/_base_client.py +++ b/src/digitalocean_genai_sdk/_base_client.py @@ -1071,7 +1071,14 @@ def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1574,7 +1581,14 @@ async def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") From 875e9c170f391d88585baaf6c0e3df4263eb17b4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 16:42:53 +0000 Subject: [PATCH 023/200] feat(api): update via SDK Studio --- .github/workflows/create-releases.yml | 38 + .github/workflows/publish-pypi.yml | 10 +- .github/workflows/release-doctor.yml | 5 +- .stats.yml | 4 +- CONTRIBUTING.md | 6 +- LICENSE | 2 +- README.md | 92 +- SECURITY.md | 2 +- api.md | 263 +---- bin/check-release-environment | 6 +- mypy.ini | 2 +- pyproject.toml | 18 +- release-please-config.json | 2 +- scripts/lint | 2 +- .../_utils/_resources_proxy.py | 24 - .../resources/__init__.py | 145 --- .../resources/agents/__init__.py | 89 -- .../resources/agents/agents.py | 965 ------------------ .../resources/agents/api_keys.py | 581 ----------- .../resources/agents/child_agents.py | 508 --------- .../resources/agents/functions.py | 421 -------- .../resources/agents/knowledge_bases.py | 346 ------- .../resources/api_keys/__init__.py | 19 - .../resources/api_keys/api_keys.py | 275 ----- .../resources/api_keys/api_keys_.py | 529 ---------- .../resources/auth/agents/__init__.py | 33 - .../resources/auth/agents/token.py | 173 ---- .../resources/auth/auth.py | 102 -- .../resources/indexing_jobs.py | 543 ---------- .../resources/knowledge_bases/__init__.py | 33 - .../resources/knowledge_bases/data_sources.py | 410 -------- .../knowledge_bases/knowledge_bases.py | 667 ------------ .../resources/providers/__init__.py | 47 - .../resources/providers/anthropic/__init__.py | 33 - .../providers/anthropic/anthropic.py | 102 -- .../resources/providers/anthropic/keys.py | 662 ------------ .../resources/providers/openai/__init__.py | 33 - .../resources/providers/openai/keys.py | 658 ------------ .../resources/providers/openai/openai.py | 102 -- .../resources/providers/providers.py | 134 --- .../resources/regions.py | 191 ---- src/digitalocean_genai_sdk/types/__init__.py | 57 -- .../types/agent_create_params.py | 39 - .../types/agent_create_response.py | 16 - .../types/agent_delete_response.py | 16 - .../types/agent_list_params.py | 18 - .../types/agent_list_response.py | 198 ---- .../types/agent_retrieve_response.py | 16 - .../types/agent_update_params.py | 65 -- .../types/agent_update_response.py | 16 - .../types/agent_update_status_params.py | 16 - .../types/agent_update_status_response.py | 16 - .../types/agents/__init__.py | 31 - .../types/agents/api_key_create_params.py | 15 - .../types/agents/api_key_create_response.py | 12 - .../types/agents/api_key_delete_response.py | 12 - .../types/agents/api_key_list_params.py | 15 - .../types/agents/api_key_list_response.py | 18 - .../agents/api_key_regenerate_response.py | 12 - .../types/agents/api_key_update_params.py | 19 - .../types/agents/api_key_update_response.py | 12 - .../agents/api_link_knowledge_base_output.py | 16 - .../types/agents/child_agent_add_params.py | 22 - .../types/agents/child_agent_add_response.py | 14 - .../agents/child_agent_delete_response.py | 13 - .../types/agents/child_agent_update_params.py | 24 - .../agents/child_agent_update_response.py | 18 - .../types/agents/child_agent_view_response.py | 16 - .../types/agents/function_create_params.py | 25 - .../types/agents/function_create_response.py | 16 - .../types/agents/function_delete_response.py | 16 - .../types/agents/function_update_params.py | 29 - .../types/agents/function_update_response.py | 16 - .../agents/knowledge_base_detach_response.py | 16 - src/digitalocean_genai_sdk/types/api_agent.py | 263 ----- .../types/api_agent_api_key_info.py | 22 - .../types/api_agreement.py | 17 - .../types/api_anthropic_api_key_info.py | 22 - .../types/api_indexing_job.py | 43 - .../types/api_key_list_params.py | 42 - .../types/api_key_list_response.py | 42 - .../types/api_keys/__init__.py | 13 - .../types/api_keys/api_key_create_params.py | 11 - .../types/api_keys/api_key_create_response.py | 12 - .../types/api_keys/api_key_delete_response.py | 12 - .../types/api_keys/api_key_list_params.py | 15 - .../types/api_keys/api_key_list_response.py | 18 - .../types/api_keys/api_key_update_params.py | 15 - .../api_key_update_regenerate_response.py | 12 - .../types/api_keys/api_key_update_response.py | 12 - .../types/api_keys/api_model_api_key_info.py | 22 - .../types/api_knowledge_base.py | 37 - src/digitalocean_genai_sdk/types/api_model.py | 57 -- .../types/api_model_version.py | 15 - .../types/api_openai_api_key_info.py | 25 - .../types/auth/agents/__init__.py | 6 - .../types/auth/agents/token_create_params.py | 13 - .../auth/agents/token_create_response.py | 13 - .../types/indexing_job_create_params.py | 14 - .../types/indexing_job_create_response.py | 12 - .../types/indexing_job_list_params.py | 15 - .../types/indexing_job_list_response.py | 18 - ...xing_job_retrieve_data_sources_response.py | 52 - .../types/indexing_job_retrieve_response.py | 12 - .../indexing_job_update_cancel_params.py | 14 - .../indexing_job_update_cancel_response.py | 12 - .../types/knowledge_base_create_params.py | 64 -- .../types/knowledge_base_create_response.py | 12 - .../types/knowledge_base_delete_response.py | 11 - .../types/knowledge_base_list_params.py | 15 - .../types/knowledge_base_list_response.py | 18 - .../types/knowledge_base_retrieve_response.py | 30 - .../types/knowledge_base_update_params.py | 27 - .../types/knowledge_base_update_response.py | 12 - .../types/knowledge_bases/__init__.py | 16 - .../api_file_upload_data_source.py | 15 - .../api_file_upload_data_source_param.py | 15 - .../api_knowledge_base_data_source.py | 35 - .../knowledge_bases/api_spaces_data_source.py | 15 - .../api_spaces_data_source_param.py | 15 - .../api_web_crawler_data_source.py | 26 - .../api_web_crawler_data_source_param.py | 25 - .../data_source_create_params.py | 33 - .../data_source_create_response.py | 12 - .../data_source_delete_response.py | 13 - .../data_source_list_params.py | 15 - .../data_source_list_response.py | 18 - .../types/providers/anthropic/__init__.py | 14 - .../providers/anthropic/key_create_params.py | 13 - .../anthropic/key_create_response.py | 12 - .../anthropic/key_delete_response.py | 12 - .../anthropic/key_list_agents_params.py | 15 - .../anthropic/key_list_agents_response.py | 22 - .../providers/anthropic/key_list_params.py | 15 - .../providers/anthropic/key_list_response.py | 18 - .../anthropic/key_retrieve_response.py | 12 - .../providers/anthropic/key_update_params.py | 17 - .../anthropic/key_update_response.py | 12 - .../types/providers/openai/__init__.py | 14 - .../providers/openai/key_create_params.py | 13 - .../providers/openai/key_create_response.py | 12 - .../providers/openai/key_delete_response.py | 12 - .../types/providers/openai/key_list_params.py | 15 - .../providers/openai/key_list_response.py | 18 - .../openai/key_retrieve_agents_params.py | 15 - .../openai/key_retrieve_agents_response.py | 22 - .../providers/openai/key_retrieve_response.py | 12 - .../providers/openai/key_update_params.py | 17 - .../providers/openai/key_update_response.py | 12 - .../types/region_list_params.py | 15 - .../types/region_list_response.py | 23 - .../__init__.py | 16 +- .../_base_client.py | 2 +- .../_client.py | 301 +----- .../_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 4 +- .../_files.py | 0 .../_models.py | 0 .../_qs.py | 0 .../_resource.py | 10 +- .../_response.py | 12 +- .../_streaming.py | 6 +- .../_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 6 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 src/gradientai/_utils/_resources_proxy.py | 24 + .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 .../_version.py | 2 +- src/gradientai/lib/.keep | 4 + .../py.typed | 0 src/gradientai/resources/__init__.py | 61 ++ .../resources/agents}/__init__.py | 28 +- .../resources}/agents/agents.py | 50 +- .../resources/agents/versions.py | 8 +- .../resources/chat.py | 8 +- .../resources/embeddings.py | 8 +- .../resources/models.py | 8 +- src/gradientai/types/__init__.py | 16 + src/gradientai/types/agents/__init__.py | 10 + .../types/agents/api_links.py | 0 .../types/agents/api_meta.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_keys}/__init__.py | 0 .../types/api_retrieval_method.py | 0 .../types/auth}/__init__.py | 0 .../gradientai/types/auth/agents}/__init__.py | 2 + ...request_message_content_part_text_param.py | 0 .../types/chat_completion_token_logprob.py | 0 .../types/chat_create_completion_params.py | 0 .../types/chat_create_completion_response.py | 0 .../types/embedding_create_params.py | 0 .../types/embedding_create_response.py | 0 .../types/knowledge_bases}/__init__.py | 2 + .../types/model.py | 0 .../types/model_list_response.py | 0 .../gradientai/types/providers}/__init__.py | 2 + .../types/providers/anthropic}/__init__.py | 2 + .../types/providers/openai/__init__.py | 3 + tests/api_resources/agents/test_api_keys.py | 572 ----------- .../api_resources/agents/test_child_agents.py | 485 --------- tests/api_resources/agents/test_functions.py | 382 ------- .../agents/test_knowledge_bases.py | 314 ------ tests/api_resources/agents/test_versions.py | 44 +- .../api_resources/api_keys/test_api_keys_.py | 446 -------- tests/api_resources/auth/agents/test_token.py | 124 --- .../knowledge_bases/test_data_sources.py | 374 ------- tests/api_resources/providers/__init__.py | 1 - .../providers/anthropic/__init__.py | 1 - .../providers/anthropic/test_keys.py | 555 ---------- .../providers/openai/__init__.py | 1 - .../providers/openai/test_keys.py | 555 ---------- tests/api_resources/test_agents.py | 597 ----------- tests/api_resources/test_api_keys.py | 100 -- tests/api_resources/test_chat.py | 20 +- tests/api_resources/test_embeddings.py | 20 +- tests/api_resources/test_indexing_jobs.py | 446 -------- tests/api_resources/test_knowledge_bases.py | 510 --------- tests/api_resources/test_models.py | 32 +- tests/api_resources/test_regions.py | 96 -- tests/conftest.py | 14 +- tests/test_client.py | 205 ++-- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 40 +- tests/test_streaming.py | 44 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 244 files changed, 596 insertions(+), 16866 deletions(-) create mode 100644 .github/workflows/create-releases.yml delete mode 100644 src/digitalocean_genai_sdk/_utils/_resources_proxy.py delete mode 100644 src/digitalocean_genai_sdk/resources/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/agents.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/api_keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/child_agents.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/functions.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py delete mode 100644 src/digitalocean_genai_sdk/resources/api_keys/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/api_keys/api_keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py delete mode 100644 src/digitalocean_genai_sdk/resources/auth/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/auth/agents/token.py delete mode 100644 src/digitalocean_genai_sdk/resources/auth/auth.py delete mode 100644 src/digitalocean_genai_sdk/resources/indexing_jobs.py delete mode 100644 src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py delete mode 100644 src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/openai/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/openai/keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/openai/openai.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/providers.py delete mode 100644 src/digitalocean_genai_sdk/resources/regions.py delete mode 100644 src/digitalocean_genai_sdk/types/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_status_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_status_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_agent.py delete mode 100644 src/digitalocean_genai_sdk/types/api_agent_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/api_agreement.py delete mode 100644 src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/api_indexing_job.py delete mode 100644 src/digitalocean_genai_sdk/types/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/api_knowledge_base.py delete mode 100644 src/digitalocean_genai_sdk/types/api_model.py delete mode 100644 src/digitalocean_genai_sdk/types/api_model_version.py delete mode 100644 src/digitalocean_genai_sdk/types/api_openai_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/auth/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/region_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/region_list_response.py rename src/{digitalocean_genai_sdk => gradientai}/__init__.py (87%) rename src/{digitalocean_genai_sdk => gradientai}/_base_client.py (99%) rename src/{digitalocean_genai_sdk => gradientai}/_client.py (63%) rename src/{digitalocean_genai_sdk => gradientai}/_compat.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_constants.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_exceptions.py (97%) rename src/{digitalocean_genai_sdk => gradientai}/_files.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_models.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_qs.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_resource.py (76%) rename src/{digitalocean_genai_sdk => gradientai}/_response.py (98%) rename src/{digitalocean_genai_sdk => gradientai}/_streaming.py (98%) rename src/{digitalocean_genai_sdk => gradientai}/_types.py (99%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/__init__.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_logs.py (67%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_proxy.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_reflection.py (100%) create mode 100644 src/gradientai/_utils/_resources_proxy.py rename src/{digitalocean_genai_sdk => gradientai}/_utils/_streams.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_sync.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_transform.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_typing.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_utils.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_version.py (79%) create mode 100644 src/gradientai/lib/.keep rename src/{digitalocean_genai_sdk => gradientai}/py.typed (100%) create mode 100644 src/gradientai/resources/__init__.py rename src/{digitalocean_genai_sdk/resources/auth => gradientai/resources/agents}/__init__.py (53%) rename src/{digitalocean_genai_sdk/resources/auth => gradientai/resources}/agents/agents.py (64%) rename src/{digitalocean_genai_sdk => gradientai}/resources/agents/versions.py (97%) rename src/{digitalocean_genai_sdk => gradientai}/resources/chat.py (98%) rename src/{digitalocean_genai_sdk => gradientai}/resources/embeddings.py (97%) rename src/{digitalocean_genai_sdk => gradientai}/resources/models.py (97%) create mode 100644 src/gradientai/types/__init__.py create mode 100644 src/gradientai/types/agents/__init__.py rename src/{digitalocean_genai_sdk => gradientai}/types/agents/api_links.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/api_meta.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_list_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_list_response.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_update_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_update_response.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/api_deployment_visibility.py (100%) rename src/{digitalocean_genai_sdk/types/auth => gradientai/types/api_keys}/__init__.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/api_retrieval_method.py (100%) rename src/{digitalocean_genai_sdk/types/providers => gradientai/types/auth}/__init__.py (100%) rename {tests/api_resources/api_keys => src/gradientai/types/auth/agents}/__init__.py (70%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_completion_request_message_content_part_text_param.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_completion_token_logprob.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_create_completion_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_create_completion_response.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/embedding_create_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/embedding_create_response.py (100%) rename {tests/api_resources/auth => src/gradientai/types/knowledge_bases}/__init__.py (70%) rename src/{digitalocean_genai_sdk => gradientai}/types/model.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/model_list_response.py (100%) rename {tests/api_resources/auth/agents => src/gradientai/types/providers}/__init__.py (70%) rename {tests/api_resources/knowledge_bases => src/gradientai/types/providers/anthropic}/__init__.py (70%) create mode 100644 src/gradientai/types/providers/openai/__init__.py delete mode 100644 tests/api_resources/agents/test_api_keys.py delete mode 100644 tests/api_resources/agents/test_child_agents.py delete mode 100644 tests/api_resources/agents/test_functions.py delete mode 100644 tests/api_resources/agents/test_knowledge_bases.py delete mode 100644 tests/api_resources/api_keys/test_api_keys_.py delete mode 100644 tests/api_resources/auth/agents/test_token.py delete mode 100644 tests/api_resources/knowledge_bases/test_data_sources.py delete mode 100644 tests/api_resources/providers/__init__.py delete mode 100644 tests/api_resources/providers/anthropic/__init__.py delete mode 100644 tests/api_resources/providers/anthropic/test_keys.py delete mode 100644 tests/api_resources/providers/openai/__init__.py delete mode 100644 tests/api_resources/providers/openai/test_keys.py delete mode 100644 tests/api_resources/test_agents.py delete mode 100644 tests/api_resources/test_api_keys.py delete mode 100644 tests/api_resources/test_indexing_jobs.py delete mode 100644 tests/api_resources/test_knowledge_bases.py delete mode 100644 tests/api_resources/test_regions.py diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 00000000..04dac49f --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,38 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - main + +jobs: + release: + name: release + if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Install Rye + if: ${{ steps.release.outputs.releases_created }} + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 2bc5b4b2..bff3a970 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to PyPI in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml +# workflow for re-running publishing to PyPI in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish @@ -28,4 +24,4 @@ jobs: run: | bash ./bin/publish-pypi env: - PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 0f23cbc4..94e02117 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,4 +18,5 @@ jobs: run: | bash ./bin/check-release-environment env: - PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} + STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index eb8f1c2d..652e9eac 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 60 +configured_endpoints: 6 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 565bf6264bdf2a317cc5e2f02d02a702 +config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d5d60a7..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/digitalocean_genai_sdk/lib/` and `examples/` directories. +modify the contents of the `src/gradientai/lib/` and `examples/` directories. ## Adding and running examples @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/genai-python.git +$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/LICENSE b/LICENSE index 0c1fe1d5..974cb08a 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 Digitalocean Genai SDK + Copyright 2025 Gradient AI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 7b7f4731..a6757d3a 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ -# Digitalocean Genai SDK Python API library +# Gradient AI Python API library [![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg)](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) -The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ +The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -25,9 +25,9 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI -client = DigitaloceanGenaiSDK( +client = GradientAI( api_key=os.environ.get( "DIGITALOCEAN_GENAI_SDK_API_KEY" ), # This is the default and can be omitted @@ -46,14 +46,14 @@ so that your API Key is not stored in source control. ## Async usage -Simply import `AsyncDigitaloceanGenaiSDK` instead of `DigitaloceanGenaiSDK` and use `await` with each API call: +Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with each API call: ```python import os import asyncio -from digitalocean_genai_sdk import AsyncDigitaloceanGenaiSDK +from gradientai import AsyncGradientAI -client = AsyncDigitaloceanGenaiSDK( +client = AsyncGradientAI( api_key=os.environ.get( "DIGITALOCEAN_GENAI_SDK_API_KEY" ), # This is the default and can be omitted @@ -86,42 +86,48 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from digitalocean_genai_sdk import DigitaloceanGenaiSDK - -client = DigitaloceanGenaiSDK() - -data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={}, +from gradientai import GradientAI + +client = GradientAI() + +response = client.chat.create_completion( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream_options={}, ) -print(data_source.aws_data_source) +print(response.stream_options) ``` ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `digitalocean_genai_sdk.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `digitalocean_genai_sdk.APIError`. +All errors inherit from `gradientai.APIError`. ```python -import digitalocean_genai_sdk -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +import gradientai +from gradientai import GradientAI -client = DigitaloceanGenaiSDK() +client = GradientAI() try: client.agents.versions.list( uuid="REPLACE_ME", ) -except digitalocean_genai_sdk.APIConnectionError as e: +except gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except digitalocean_genai_sdk.RateLimitError as e: +except gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except digitalocean_genai_sdk.APIStatusError as e: +except gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -149,10 +155,10 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI # Configure the default for all requests: -client = DigitaloceanGenaiSDK( +client = GradientAI( # default is 2 max_retries=0, ) @@ -169,16 +175,16 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: ```python -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI # Configure the default for all requests: -client = DigitaloceanGenaiSDK( +client = GradientAI( # 20 seconds (default is 1 minute) timeout=20.0, ) # More granular control: -client = DigitaloceanGenaiSDK( +client = GradientAI( timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), ) @@ -198,10 +204,10 @@ Note that requests that time out are [retried twice by default](#retries). We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. -You can enable logging by setting the environment variable `DIGITALOCEAN_GENAI_SDK_LOG` to `info`. +You can enable logging by setting the environment variable `GRADIENT_AI_LOG` to `info`. ```shell -$ export DIGITALOCEAN_GENAI_SDK_LOG=info +$ export GRADIENT_AI_LOG=info ``` Or to `debug` for more verbose logging. @@ -223,9 +229,9 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI -client = DigitaloceanGenaiSDK() +client = GradientAI() response = client.agents.versions.with_raw_response.list( uuid="REPLACE_ME", ) @@ -235,9 +241,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -301,10 +307,10 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, DefaultHttpxClient +from gradientai import GradientAI, DefaultHttpxClient -client = DigitaloceanGenaiSDK( - # Or use the `DIGITALOCEAN_GENAI_SDK_BASE_URL` env var +client = GradientAI( + # Or use the `GRADIENT_AI_BASE_URL` env var base_url="http://my.test.server.example.com:8083", http_client=DefaultHttpxClient( proxy="http://my.test.proxy.example.com", @@ -324,9 +330,9 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI -with DigitaloceanGenaiSDK() as client: +with GradientAI() as client: # make requests here ... @@ -343,7 +349,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version @@ -352,8 +358,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import digitalocean_genai_sdk -print(digitalocean_genai_sdk.__version__) +import gradientai +print(gradientai.__version__) ``` ## Requirements diff --git a/SECURITY.md b/SECURITY.md index d08f7996..a7593759 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,7 +16,7 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Digitalocean Genai SDK, please follow the respective company's security reporting guidelines. +or products provided by Gradient AI, please follow the respective company's security reporting guidelines. --- diff --git a/api.md b/api.md index 32dbe7df..d05dac3c 100644 --- a/api.md +++ b/api.md @@ -3,7 +3,7 @@ Types: ```python -from digitalocean_genai_sdk.types import ( +from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAnthropicAPIKeyInfo, @@ -11,310 +11,81 @@ from digitalocean_genai_sdk.types import ( APIModel, APIOpenAIAPIKeyInfo, APIRetrievalMethod, - AgentCreateResponse, - AgentRetrieveResponse, - AgentUpdateResponse, - AgentListResponse, - AgentDeleteResponse, - AgentUpdateStatusResponse, ) ``` -Methods: - -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse - -## APIKeys - -Types: - -```python -from digitalocean_genai_sdk.types.agents import ( - APIKeyCreateResponse, - APIKeyUpdateResponse, - APIKeyListResponse, - APIKeyDeleteResponse, - APIKeyRegenerateResponse, -) -``` - -Methods: - -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse - -## Functions - -Types: - -```python -from digitalocean_genai_sdk.types.agents import ( - FunctionCreateResponse, - FunctionUpdateResponse, - FunctionDeleteResponse, -) -``` - -Methods: - -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse - ## Versions Types: ```python -from digitalocean_genai_sdk.types.agents import ( - APILinks, - APIMeta, - VersionUpdateResponse, - VersionListResponse, -) +from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from digitalocean_genai_sdk.types.agents import ( - APILinkKnowledgeBaseOutput, - KnowledgeBaseDetachResponse, -) -``` - -Methods: - -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse - -## ChildAgents - -Types: - -```python -from digitalocean_genai_sdk.types.agents import ( - ChildAgentUpdateResponse, - ChildAgentDeleteResponse, - ChildAgentAddResponse, - ChildAgentViewResponse, -) -``` - -Methods: - -- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse - -# Providers - -## Anthropic - -### Keys - -Types: - -```python -from digitalocean_genai_sdk.types.providers.anthropic import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyListAgentsResponse, -) -``` - -Methods: - -- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse - -## OpenAI - -### Keys - -Types: - -```python -from digitalocean_genai_sdk.types.providers.openai import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyRetrieveAgentsResponse, -) -``` - -Methods: - -- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse - -# Auth - -## Agents - -### Token - -Types: - -```python -from digitalocean_genai_sdk.types.auth.agents import TokenCreateResponse -``` - -Methods: - -- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse - -# Regions - -Types: - -```python -from digitalocean_genai_sdk.types import RegionListResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput ``` -Methods: - -- client.regions.list(\*\*params) -> RegionListResponse - # IndexingJobs Types: ```python -from digitalocean_genai_sdk.types import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) +from gradientai.types import APIIndexingJob ``` -Methods: - -- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # KnowledgeBases Types: ```python -from digitalocean_genai_sdk.types import ( - APIKnowledgeBase, - KnowledgeBaseCreateResponse, - KnowledgeBaseRetrieveResponse, - KnowledgeBaseUpdateResponse, - KnowledgeBaseListResponse, - KnowledgeBaseDeleteResponse, -) +from gradientai.types import APIKnowledgeBase ``` -Methods: - -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse - ## DataSources Types: ```python -from digitalocean_genai_sdk.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, APIWebCrawlerDataSource, - DataSourceCreateResponse, - DataSourceListResponse, - DataSourceDeleteResponse, ) ``` -Methods: - -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse - # APIKeys Types: ```python -from digitalocean_genai_sdk.types import APIAgreement, APIModelVersion, APIKeyListResponse +from gradientai.types import APIAgreement, APIModelVersion ``` -Methods: - -- client.api_keys.list(\*\*params) -> APIKeyListResponse - ## APIKeys Types: ```python -from digitalocean_genai_sdk.types.api_keys import ( - APIModelAPIKeyInfo, - APIKeyCreateResponse, - APIKeyUpdateResponse, - APIKeyListResponse, - APIKeyDeleteResponse, - APIKeyUpdateRegenerateResponse, -) +from gradientai.types.api_keys import APIModelAPIKeyInfo ``` -Methods: - -- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse -- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse - # Chat Types: ```python -from digitalocean_genai_sdk.types import ( +from gradientai.types import ( ChatCompletionRequestMessageContentPartText, ChatCompletionTokenLogprob, ChatCreateCompletionResponse, @@ -323,29 +94,29 @@ from digitalocean_genai_sdk.types import ( Methods: -- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse +- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse # Embeddings Types: ```python -from digitalocean_genai_sdk.types import EmbeddingCreateResponse +from gradientai.types import EmbeddingCreateResponse ``` Methods: -- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse +- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse # Models Types: ```python -from digitalocean_genai_sdk.types import Model, ModelListResponse +from gradientai.types import Model, ModelListResponse ``` Methods: -- client.models.retrieve(model) -> Model -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> Model +- client.models.list() -> ModelListResponse diff --git a/bin/check-release-environment b/bin/check-release-environment index 9e89a88a..78967e8b 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,8 +2,12 @@ errors=() +if [ -z "${STAINLESS_API_KEY}" ]; then + errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") +fi + if [ -z "${PYPI_TOKEN}" ]; then - errors+=("The DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") + errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi lenErrors=${#errors[@]} diff --git a/mypy.ini b/mypy.ini index 54f4282a..748d8234 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/digitalocean_genai_sdk/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 03329f8b..9c6fdd19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" version = "0.1.0-alpha.3" -description = "The official Python library for the digitalocean-genai-sdk API" +description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" authors = [ -{ name = "Digitalocean Genai SDK", email = "" }, +{ name = "Gradient AI", email = "" }, ] dependencies = [ "httpx>=0.23.0, <1", @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/genai-python" -Repository = "https://github.com/digitalocean/genai-python" +Homepage = "https://github.com/digitalocean/gradientai-python" +Repository = "https://github.com/digitalocean/gradientai-python" [tool.rye] @@ -76,14 +76,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import digitalocean_genai_sdk'" +"check:importable" = "python -c 'import gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes digitalocean_genai_sdk --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -96,7 +96,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/digitalocean_genai_sdk"] +packages = ["src/gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -122,7 +122,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] @@ -199,7 +199,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["digitalocean_genai_sdk", "tests"] +known-first-party = ["gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index 234b9475..2ff9a58c 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/digitalocean_genai_sdk/_version.py" + "src/gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index 3f725f2d..37b38f6f 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import digitalocean_genai_sdk' +rye run python -c 'import gradientai' diff --git a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py b/src/digitalocean_genai_sdk/_utils/_resources_proxy.py deleted file mode 100644 index 4ebaf7a4..00000000 --- a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from typing import Any -from typing_extensions import override - -from ._proxy import LazyProxy - - -class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `digitalocean_genai_sdk.resources` module. - - This is used so that we can lazily import `digitalocean_genai_sdk.resources` only when - needed *and* so that users can just import `digitalocean_genai_sdk` and reference `digitalocean_genai_sdk.resources` - """ - - @override - def __load__(self) -> Any: - import importlib - - mod = importlib.import_module("digitalocean_genai_sdk.resources") - return mod - - -resources = ResourcesProxy().__as_proxied__() diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py deleted file mode 100644 index 6dcbff02..00000000 --- a/src/digitalocean_genai_sdk/resources/__init__.py +++ /dev/null @@ -1,145 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) -from .chat import ( - ChatResource, - AsyncChatResource, - ChatResourceWithRawResponse, - AsyncChatResourceWithRawResponse, - ChatResourceWithStreamingResponse, - AsyncChatResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) -from .regions import ( - RegionsResource, - AsyncRegionsResource, - RegionsResourceWithRawResponse, - AsyncRegionsResourceWithRawResponse, - RegionsResourceWithStreamingResponse, - AsyncRegionsResourceWithStreamingResponse, -) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) -from .embeddings import ( - EmbeddingsResource, - AsyncEmbeddingsResource, - EmbeddingsResourceWithRawResponse, - AsyncEmbeddingsResourceWithRawResponse, - EmbeddingsResourceWithStreamingResponse, - AsyncEmbeddingsResourceWithStreamingResponse, -) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", - "RegionsResource", - "AsyncRegionsResource", - "RegionsResourceWithRawResponse", - "AsyncRegionsResourceWithRawResponse", - "RegionsResourceWithStreamingResponse", - "AsyncRegionsResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", - "EmbeddingsResource", - "AsyncEmbeddingsResource", - "EmbeddingsResourceWithRawResponse", - "AsyncEmbeddingsResourceWithRawResponse", - "EmbeddingsResourceWithStreamingResponse", - "AsyncEmbeddingsResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/agents/__init__.py b/src/digitalocean_genai_sdk/resources/agents/__init__.py deleted file mode 100644 index f41a0408..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .versions import ( - VersionsResource, - AsyncVersionsResource, - VersionsResourceWithRawResponse, - AsyncVersionsResourceWithRawResponse, - VersionsResourceWithStreamingResponse, - AsyncVersionsResourceWithStreamingResponse, -) -from .functions import ( - FunctionsResource, - AsyncFunctionsResource, - FunctionsResourceWithRawResponse, - AsyncFunctionsResourceWithRawResponse, - FunctionsResourceWithStreamingResponse, - AsyncFunctionsResourceWithStreamingResponse, -) -from .child_agents import ( - ChildAgentsResource, - AsyncChildAgentsResource, - ChildAgentsResourceWithRawResponse, - AsyncChildAgentsResourceWithRawResponse, - ChildAgentsResourceWithStreamingResponse, - AsyncChildAgentsResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", - "FunctionsResource", - "AsyncFunctionsResource", - "FunctionsResourceWithRawResponse", - "AsyncFunctionsResourceWithRawResponse", - "FunctionsResourceWithStreamingResponse", - "AsyncFunctionsResourceWithStreamingResponse", - "VersionsResource", - "AsyncVersionsResource", - "VersionsResourceWithRawResponse", - "AsyncVersionsResourceWithRawResponse", - "VersionsResourceWithStreamingResponse", - "AsyncVersionsResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", - "ChildAgentsResource", - "AsyncChildAgentsResource", - "ChildAgentsResourceWithRawResponse", - "AsyncChildAgentsResourceWithRawResponse", - "ChildAgentsResourceWithStreamingResponse", - "AsyncChildAgentsResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/agents/agents.py b/src/digitalocean_genai_sdk/resources/agents/agents.py deleted file mode 100644 index 6d3ce525..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/agents.py +++ /dev/null @@ -1,965 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List - -import httpx - -from ...types import ( - APIRetrievalMethod, - APIDeploymentVisibility, - agent_list_params, - agent_create_params, - agent_update_params, - agent_update_status_params, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .versions import ( - VersionsResource, - AsyncVersionsResource, - VersionsResourceWithRawResponse, - AsyncVersionsResourceWithRawResponse, - VersionsResourceWithStreamingResponse, - AsyncVersionsResourceWithStreamingResponse, -) -from ..._compat import cached_property -from .functions import ( - FunctionsResource, - AsyncFunctionsResource, - FunctionsResourceWithRawResponse, - AsyncFunctionsResourceWithRawResponse, - FunctionsResourceWithStreamingResponse, - AsyncFunctionsResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .child_agents import ( - ChildAgentsResource, - AsyncChildAgentsResource, - ChildAgentsResourceWithRawResponse, - AsyncChildAgentsResourceWithRawResponse, - ChildAgentsResourceWithStreamingResponse, - AsyncChildAgentsResourceWithStreamingResponse, -) -from ..._base_client import make_request_options -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) -from ...types.agent_list_response import AgentListResponse -from ...types.api_retrieval_method import APIRetrievalMethod -from ...types.agent_create_response import AgentCreateResponse -from ...types.agent_delete_response import AgentDeleteResponse -from ...types.agent_update_response import AgentUpdateResponse -from ...types.agent_retrieve_response import AgentRetrieveResponse -from ...types.api_deployment_visibility import APIDeploymentVisibility -from ...types.agent_update_status_response import AgentUpdateStatusResponse - -__all__ = ["AgentsResource", "AsyncAgentsResource"] - - -class AgentsResource(SyncAPIResource): - @cached_property - def api_keys(self) -> APIKeysResource: - return APIKeysResource(self._client) - - @cached_property - def functions(self) -> FunctionsResource: - return FunctionsResource(self._client) - - @cached_property - def versions(self) -> VersionsResource: - return VersionsResource(self._client) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResource: - return KnowledgeBasesResource(self._client) - - @cached_property - def child_agents(self) -> ChildAgentsResource: - return ChildAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AgentsResourceWithStreamingResponse(self) - - def create( - self, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: - """To create a new agent, send a POST request to `/v2/gen-ai/agents`. - - The response - body contains a JSON object with the newly created agent object. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - model_uuid: Identifier for the foundation model. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/agents", - body=maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "knowledge_base_uuid": knowledge_base_uuid, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "region": region, - "tags": tags, - }, - agent_create_params.AgentCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentCreateResponse, - ) - - def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: - """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentRetrieveResponse, - ) - - def update( - self, - path_uuid: str, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - k: int | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - provide_citations: bool | NotGiven = NOT_GIVEN, - retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - top_p: float | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: - """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - max_tokens: Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - - model_uuid: Identifier for the foundation model. - - temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower - values produce more predictable and conservative responses, while higher values - encourage creativity and variation. - - top_p: Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_uuid}", - body=maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "k": k, - "max_tokens": max_tokens, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "provide_citations": provide_citations, - "retrieval_method": retrieval_method, - "tags": tags, - "temperature": temperature, - "top_p": top_p, - "body_uuid": body_uuid, - }, - agent_update_params.AgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateResponse, - ) - - def list( - self, - *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: - """ - To list all agents, send a GET request to `/v2/gen-ai/agents`. - - Args: - only_deployed: only list agents that are deployed. - - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "only_deployed": only_deployed, - "page": page, - "per_page": per_page, - }, - agent_list_params.AgentListParams, - ), - ), - cast_to=AgentListResponse, - ) - - def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: - """ - To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._delete( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentDeleteResponse, - ) - - def update_status( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: - """Check whether an agent is public or private. - - To update the agent status, send a - PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_uuid}/deployment_visibility", - body=maybe_transform( - { - "body_uuid": body_uuid, - "visibility": visibility, - }, - agent_update_status_params.AgentUpdateStatusParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateStatusResponse, - ) - - -class AsyncAgentsResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - return AsyncAPIKeysResource(self._client) - - @cached_property - def functions(self) -> AsyncFunctionsResource: - return AsyncFunctionsResource(self._client) - - @cached_property - def versions(self) -> AsyncVersionsResource: - return AsyncVersionsResource(self._client) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResource: - return AsyncKnowledgeBasesResource(self._client) - - @cached_property - def child_agents(self) -> AsyncChildAgentsResource: - return AsyncChildAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAgentsResourceWithStreamingResponse(self) - - async def create( - self, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: - """To create a new agent, send a POST request to `/v2/gen-ai/agents`. - - The response - body contains a JSON object with the newly created agent object. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - model_uuid: Identifier for the foundation model. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/agents", - body=await async_maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "knowledge_base_uuid": knowledge_base_uuid, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "region": region, - "tags": tags, - }, - agent_create_params.AgentCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentCreateResponse, - ) - - async def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: - """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentRetrieveResponse, - ) - - async def update( - self, - path_uuid: str, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - k: int | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - provide_citations: bool | NotGiven = NOT_GIVEN, - retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - top_p: float | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: - """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - max_tokens: Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - - model_uuid: Identifier for the foundation model. - - temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower - values produce more predictable and conservative responses, while higher values - encourage creativity and variation. - - top_p: Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_uuid}", - body=await async_maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "k": k, - "max_tokens": max_tokens, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "provide_citations": provide_citations, - "retrieval_method": retrieval_method, - "tags": tags, - "temperature": temperature, - "top_p": top_p, - "body_uuid": body_uuid, - }, - agent_update_params.AgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateResponse, - ) - - async def list( - self, - *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: - """ - To list all agents, send a GET request to `/v2/gen-ai/agents`. - - Args: - only_deployed: only list agents that are deployed. - - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "only_deployed": only_deployed, - "page": page, - "per_page": per_page, - }, - agent_list_params.AgentListParams, - ), - ), - cast_to=AgentListResponse, - ) - - async def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: - """ - To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._delete( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentDeleteResponse, - ) - - async def update_status( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: - """Check whether an agent is public or private. - - To update the agent status, send a - PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_uuid}/deployment_visibility", - body=await async_maybe_transform( - { - "body_uuid": body_uuid, - "visibility": visibility, - }, - agent_update_status_params.AgentUpdateStatusParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateStatusResponse, - ) - - -class AgentsResourceWithRawResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - self.create = to_raw_response_wrapper( - agents.create, - ) - self.retrieve = to_raw_response_wrapper( - agents.retrieve, - ) - self.update = to_raw_response_wrapper( - agents.update, - ) - self.list = to_raw_response_wrapper( - agents.list, - ) - self.delete = to_raw_response_wrapper( - agents.delete, - ) - self.update_status = to_raw_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> FunctionsResourceWithRawResponse: - return FunctionsResourceWithRawResponse(self._agents.functions) - - @cached_property - def versions(self) -> VersionsResourceWithRawResponse: - return VersionsResourceWithRawResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._agents.child_agents) - - -class AsyncAgentsResourceWithRawResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - self.create = async_to_raw_response_wrapper( - agents.create, - ) - self.retrieve = async_to_raw_response_wrapper( - agents.retrieve, - ) - self.update = async_to_raw_response_wrapper( - agents.update, - ) - self.list = async_to_raw_response_wrapper( - agents.list, - ) - self.delete = async_to_raw_response_wrapper( - agents.delete, - ) - self.update_status = async_to_raw_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> AsyncFunctionsResourceWithRawResponse: - return AsyncFunctionsResourceWithRawResponse(self._agents.functions) - - @cached_property - def versions(self) -> AsyncVersionsResourceWithRawResponse: - return AsyncVersionsResourceWithRawResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) - - -class AgentsResourceWithStreamingResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - self.create = to_streamed_response_wrapper( - agents.create, - ) - self.retrieve = to_streamed_response_wrapper( - agents.retrieve, - ) - self.update = to_streamed_response_wrapper( - agents.update, - ) - self.list = to_streamed_response_wrapper( - agents.list, - ) - self.delete = to_streamed_response_wrapper( - agents.delete, - ) - self.update_status = to_streamed_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> FunctionsResourceWithStreamingResponse: - return FunctionsResourceWithStreamingResponse(self._agents.functions) - - @cached_property - def versions(self) -> VersionsResourceWithStreamingResponse: - return VersionsResourceWithStreamingResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) - - -class AsyncAgentsResourceWithStreamingResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - self.create = async_to_streamed_response_wrapper( - agents.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - agents.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - agents.update, - ) - self.list = async_to_streamed_response_wrapper( - agents.list, - ) - self.delete = async_to_streamed_response_wrapper( - agents.delete, - ) - self.update_status = async_to_streamed_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: - return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) - - @cached_property - def versions(self) -> AsyncVersionsResourceWithStreamingResponse: - return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/digitalocean_genai_sdk/resources/agents/api_keys.py b/src/digitalocean_genai_sdk/resources/agents/api_keys.py deleted file mode 100644 index 451f5cb5..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/api_keys.py +++ /dev/null @@ -1,581 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.agents.api_key_list_response import APIKeyListResponse -from ...types.agents.api_key_create_response import APIKeyCreateResponse -from ...types.agents.api_key_delete_response import APIKeyDeleteResponse -from ...types.agents.api_key_update_response import APIKeyUpdateResponse -from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create an agent API key, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/genai/agents/{path_agent_uuid}/api_keys", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "name": name, - }, - api_key_create_params.APIKeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - def list( - self, - agent_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all agent API keys, send a GET request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return self._get( - f"/v2/genai/agents/{agent_uuid}/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - def regenerate( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyRegenerateResponse: - """ - To regenerate an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._put( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyRegenerateResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create an agent API key, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/genai/agents/{path_agent_uuid}/api_keys", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "name": name, - }, - api_key_create_params.APIKeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - async def list( - self, - agent_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all agent API keys, send a GET request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return await self._get( - f"/v2/genai/agents/{agent_uuid}/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - async def regenerate( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyRegenerateResponse: - """ - To regenerate an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._put( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyRegenerateResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_raw_response_wrapper( - api_keys.create, - ) - self.update = to_raw_response_wrapper( - api_keys.update, - ) - self.list = to_raw_response_wrapper( - api_keys.list, - ) - self.delete = to_raw_response_wrapper( - api_keys.delete, - ) - self.regenerate = to_raw_response_wrapper( - api_keys.regenerate, - ) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_raw_response_wrapper( - api_keys.create, - ) - self.update = async_to_raw_response_wrapper( - api_keys.update, - ) - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - self.delete = async_to_raw_response_wrapper( - api_keys.delete, - ) - self.regenerate = async_to_raw_response_wrapper( - api_keys.regenerate, - ) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_streamed_response_wrapper( - api_keys.create, - ) - self.update = to_streamed_response_wrapper( - api_keys.update, - ) - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = to_streamed_response_wrapper( - api_keys.delete, - ) - self.regenerate = to_streamed_response_wrapper( - api_keys.regenerate, - ) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_streamed_response_wrapper( - api_keys.create, - ) - self.update = async_to_streamed_response_wrapper( - api_keys.update, - ) - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - api_keys.delete, - ) - self.regenerate = async_to_streamed_response_wrapper( - api_keys.regenerate, - ) diff --git a/src/digitalocean_genai_sdk/resources/agents/child_agents.py b/src/digitalocean_genai_sdk/resources/agents/child_agents.py deleted file mode 100644 index 7d4ed3bb..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/child_agents.py +++ /dev/null @@ -1,508 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents import child_agent_add_params, child_agent_update_params -from ...types.agents.child_agent_add_response import ChildAgentAddResponse -from ...types.agents.child_agent_view_response import ChildAgentViewResponse -from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse - -__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] - - -class ChildAgentsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ChildAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ChildAgentsResourceWithStreamingResponse(self) - - def update( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentUpdateResponse: - """ - To update an agent route for an agent, send a PUT request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return self._put( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - "uuid": uuid, - }, - child_agent_update_params.ChildAgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentUpdateResponse, - ) - - def delete( - self, - child_agent_uuid: str, - *, - parent_agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentDeleteResponse: - """ - To delete an agent route from a parent agent, send a DELETE request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not parent_agent_uuid: - raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") - if not child_agent_uuid: - raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") - return self._delete( - f"/v2/genai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentDeleteResponse, - ) - - def add( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentAddResponse: - """ - To add an agent route to an agent, send a POST request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return self._post( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - }, - child_agent_add_params.ChildAgentAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentAddResponse, - ) - - def view( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentViewResponse: - """ - To view agent routes for an agent, send a GET requtest to - `/v2/gen-ai/agents/{uuid}/child_agents`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/agents/{uuid}/child_agents", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentViewResponse, - ) - - -class AsyncChildAgentsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncChildAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncChildAgentsResourceWithStreamingResponse(self) - - async def update( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentUpdateResponse: - """ - To update an agent route for an agent, send a PUT request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return await self._put( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=await async_maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - "uuid": uuid, - }, - child_agent_update_params.ChildAgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentUpdateResponse, - ) - - async def delete( - self, - child_agent_uuid: str, - *, - parent_agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentDeleteResponse: - """ - To delete an agent route from a parent agent, send a DELETE request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not parent_agent_uuid: - raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") - if not child_agent_uuid: - raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") - return await self._delete( - f"/v2/genai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentDeleteResponse, - ) - - async def add( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentAddResponse: - """ - To add an agent route to an agent, send a POST request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return await self._post( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=await async_maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - }, - child_agent_add_params.ChildAgentAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentAddResponse, - ) - - async def view( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentViewResponse: - """ - To view agent routes for an agent, send a GET requtest to - `/v2/gen-ai/agents/{uuid}/child_agents`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/agents/{uuid}/child_agents", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentViewResponse, - ) - - -class ChildAgentsResourceWithRawResponse: - def __init__(self, child_agents: ChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = to_raw_response_wrapper( - child_agents.update, - ) - self.delete = to_raw_response_wrapper( - child_agents.delete, - ) - self.add = to_raw_response_wrapper( - child_agents.add, - ) - self.view = to_raw_response_wrapper( - child_agents.view, - ) - - -class AsyncChildAgentsResourceWithRawResponse: - def __init__(self, child_agents: AsyncChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = async_to_raw_response_wrapper( - child_agents.update, - ) - self.delete = async_to_raw_response_wrapper( - child_agents.delete, - ) - self.add = async_to_raw_response_wrapper( - child_agents.add, - ) - self.view = async_to_raw_response_wrapper( - child_agents.view, - ) - - -class ChildAgentsResourceWithStreamingResponse: - def __init__(self, child_agents: ChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = to_streamed_response_wrapper( - child_agents.update, - ) - self.delete = to_streamed_response_wrapper( - child_agents.delete, - ) - self.add = to_streamed_response_wrapper( - child_agents.add, - ) - self.view = to_streamed_response_wrapper( - child_agents.view, - ) - - -class AsyncChildAgentsResourceWithStreamingResponse: - def __init__(self, child_agents: AsyncChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = async_to_streamed_response_wrapper( - child_agents.update, - ) - self.delete = async_to_streamed_response_wrapper( - child_agents.delete, - ) - self.add = async_to_streamed_response_wrapper( - child_agents.add, - ) - self.view = async_to_streamed_response_wrapper( - child_agents.view, - ) diff --git a/src/digitalocean_genai_sdk/resources/agents/functions.py b/src/digitalocean_genai_sdk/resources/agents/functions.py deleted file mode 100644 index 89f9efa3..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/functions.py +++ /dev/null @@ -1,421 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents import function_create_params, function_update_params -from ...types.agents.function_create_response import FunctionCreateResponse -from ...types.agents.function_delete_response import FunctionDeleteResponse -from ...types.agents.function_update_response import FunctionUpdateResponse - -__all__ = ["FunctionsResource", "AsyncFunctionsResource"] - - -class FunctionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> FunctionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return FunctionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return FunctionsResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionCreateResponse: - """ - To create a function route for an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/functions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/genai/agents/{path_agent_uuid}/functions", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_create_params.FunctionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionCreateResponse, - ) - - def update( - self, - path_function_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - body_function_uuid: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionUpdateResponse: - """ - To update the function route, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_function_uuid: - raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_agent_uuid}/functions/{path_function_uuid}", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "body_function_uuid": body_function_uuid, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_update_params.FunctionUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionUpdateResponse, - ) - - def delete( - self, - function_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionDeleteResponse: - """ - To delete a function route from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not function_uuid: - raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") - return self._delete( - f"/v2/genai/agents/{agent_uuid}/functions/{function_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionDeleteResponse, - ) - - -class AsyncFunctionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFunctionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncFunctionsResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionCreateResponse: - """ - To create a function route for an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/functions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/genai/agents/{path_agent_uuid}/functions", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_create_params.FunctionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionCreateResponse, - ) - - async def update( - self, - path_function_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - body_function_uuid: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionUpdateResponse: - """ - To update the function route, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_function_uuid: - raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_agent_uuid}/functions/{path_function_uuid}", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "body_function_uuid": body_function_uuid, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_update_params.FunctionUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionUpdateResponse, - ) - - async def delete( - self, - function_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionDeleteResponse: - """ - To delete a function route from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not function_uuid: - raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") - return await self._delete( - f"/v2/genai/agents/{agent_uuid}/functions/{function_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionDeleteResponse, - ) - - -class FunctionsResourceWithRawResponse: - def __init__(self, functions: FunctionsResource) -> None: - self._functions = functions - - self.create = to_raw_response_wrapper( - functions.create, - ) - self.update = to_raw_response_wrapper( - functions.update, - ) - self.delete = to_raw_response_wrapper( - functions.delete, - ) - - -class AsyncFunctionsResourceWithRawResponse: - def __init__(self, functions: AsyncFunctionsResource) -> None: - self._functions = functions - - self.create = async_to_raw_response_wrapper( - functions.create, - ) - self.update = async_to_raw_response_wrapper( - functions.update, - ) - self.delete = async_to_raw_response_wrapper( - functions.delete, - ) - - -class FunctionsResourceWithStreamingResponse: - def __init__(self, functions: FunctionsResource) -> None: - self._functions = functions - - self.create = to_streamed_response_wrapper( - functions.create, - ) - self.update = to_streamed_response_wrapper( - functions.update, - ) - self.delete = to_streamed_response_wrapper( - functions.delete, - ) - - -class AsyncFunctionsResourceWithStreamingResponse: - def __init__(self, functions: AsyncFunctionsResource) -> None: - self._functions = functions - - self.create = async_to_streamed_response_wrapper( - functions.create, - ) - self.update = async_to_streamed_response_wrapper( - functions.update, - ) - self.delete = async_to_streamed_response_wrapper( - functions.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py deleted file mode 100644 index 4a091446..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py +++ /dev/null @@ -1,346 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse - -__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] - - -class KnowledgeBasesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KnowledgeBasesResourceWithStreamingResponse(self) - - def attach( - self, - agent_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach knowledge bases to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - def attach_single( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach a knowledge base to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - def detach( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDetachResponse: - """ - To detach a knowledge base from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return self._delete( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDetachResponse, - ) - - -class AsyncKnowledgeBasesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKnowledgeBasesResourceWithStreamingResponse(self) - - async def attach( - self, - agent_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach knowledge bases to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return await self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - async def attach_single( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach a knowledge base to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return await self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - async def detach( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDetachResponse: - """ - To detach a knowledge base from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return await self._delete( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDetachResponse, - ) - - -class KnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = to_raw_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = to_raw_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = to_raw_response_wrapper( - knowledge_bases.detach, - ) - - -class AsyncKnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = async_to_raw_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = async_to_raw_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = async_to_raw_response_wrapper( - knowledge_bases.detach, - ) - - -class KnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = to_streamed_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = to_streamed_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = to_streamed_response_wrapper( - knowledge_bases.detach, - ) - - -class AsyncKnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = async_to_streamed_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = async_to_streamed_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = async_to_streamed_response_wrapper( - knowledge_bases.detach, - ) diff --git a/src/digitalocean_genai_sdk/resources/api_keys/__init__.py b/src/digitalocean_genai_sdk/resources/api_keys/__init__.py deleted file mode 100644 index ed14565c..00000000 --- a/src/digitalocean_genai_sdk/resources/api_keys/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py deleted file mode 100644 index 63091bcc..00000000 --- a/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py +++ /dev/null @@ -1,275 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from . import api_keys_ as api_keys -from ...types import api_key_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.api_key_list_response import APIKeyListResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.APIKeysResource: - return api_keys.APIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResource: - return api_keys.AsyncAPIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py deleted file mode 100644 index 70b1147a..00000000 --- a/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py +++ /dev/null @@ -1,529 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.api_keys.api_key_list_response import APIKeyListResponse -from ...types.api_keys.api_key_create_response import APIKeyCreateResponse -from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse -from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse -from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def create( - self, - *, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/models/api_keys", - body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/models/api_keys/{path_api_key_uuid}", - body=maybe_transform( - { - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/models/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for a model, send a DELETE request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/models/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - def update_regenerate( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateRegenerateResponse: - """ - To regenerate a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._put( - f"/v2/genai/models/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateRegenerateResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/models/api_keys", - body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/models/api_keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/models/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for a model, send a DELETE request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/models/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - async def update_regenerate( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateRegenerateResponse: - """ - To regenerate a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._put( - f"/v2/genai/models/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateRegenerateResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_raw_response_wrapper( - api_keys.create, - ) - self.update = to_raw_response_wrapper( - api_keys.update, - ) - self.list = to_raw_response_wrapper( - api_keys.list, - ) - self.delete = to_raw_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = to_raw_response_wrapper( - api_keys.update_regenerate, - ) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_raw_response_wrapper( - api_keys.create, - ) - self.update = async_to_raw_response_wrapper( - api_keys.update, - ) - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - self.delete = async_to_raw_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = async_to_raw_response_wrapper( - api_keys.update_regenerate, - ) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_streamed_response_wrapper( - api_keys.create, - ) - self.update = to_streamed_response_wrapper( - api_keys.update, - ) - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = to_streamed_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = to_streamed_response_wrapper( - api_keys.update_regenerate, - ) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_streamed_response_wrapper( - api_keys.create, - ) - self.update = async_to_streamed_response_wrapper( - api_keys.update, - ) - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = async_to_streamed_response_wrapper( - api_keys.update_regenerate, - ) diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py b/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py deleted file mode 100644 index 2972198f..00000000 --- a/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = [ - "TokenResource", - "AsyncTokenResource", - "TokenResourceWithRawResponse", - "AsyncTokenResourceWithRawResponse", - "TokenResourceWithStreamingResponse", - "AsyncTokenResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/token.py b/src/digitalocean_genai_sdk/resources/auth/agents/token.py deleted file mode 100644 index 73ecef05..00000000 --- a/src/digitalocean_genai_sdk/resources/auth/agents/token.py +++ /dev/null @@ -1,173 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.auth.agents import token_create_params -from ....types.auth.agents.token_create_response import TokenCreateResponse - -__all__ = ["TokenResource", "AsyncTokenResource"] - - -class TokenResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> TokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return TokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return TokenResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/genai/auth/agents/{path_agent_uuid}/token", - body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class AsyncTokenResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncTokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncTokenResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/genai/auth/agents/{path_agent_uuid}/token", - body=await async_maybe_transform( - {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class TokenResourceWithRawResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_raw_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithRawResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_raw_response_wrapper( - token.create, - ) - - -class TokenResourceWithStreamingResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_streamed_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithStreamingResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_streamed_response_wrapper( - token.create, - ) diff --git a/src/digitalocean_genai_sdk/resources/auth/auth.py b/src/digitalocean_genai_sdk/resources/auth/auth.py deleted file mode 100644 index 985fc56c..00000000 --- a/src/digitalocean_genai_sdk/resources/auth/auth.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .agents.agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = ["AuthResource", "AsyncAuthResource"] - - -class AuthResource(SyncAPIResource): - @cached_property - def agents(self) -> AgentsResource: - return AgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AuthResourceWithStreamingResponse(self) - - -class AsyncAuthResource(AsyncAPIResource): - @cached_property - def agents(self) -> AsyncAgentsResource: - return AsyncAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAuthResourceWithStreamingResponse(self) - - -class AuthResourceWithRawResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithRawResponse: - return AgentsResourceWithRawResponse(self._auth.agents) - - -class AsyncAuthResourceWithRawResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithRawResponse: - return AsyncAgentsResourceWithRawResponse(self._auth.agents) - - -class AuthResourceWithStreamingResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithStreamingResponse: - return AgentsResourceWithStreamingResponse(self._auth.agents) - - -class AsyncAuthResourceWithStreamingResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithStreamingResponse: - return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/digitalocean_genai_sdk/resources/indexing_jobs.py b/src/digitalocean_genai_sdk/resources/indexing_jobs.py deleted file mode 100644 index 7649a7a7..00000000 --- a/src/digitalocean_genai_sdk/resources/indexing_jobs.py +++ /dev/null @@ -1,543 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List - -import httpx - -from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.indexing_job_list_response import IndexingJobListResponse -from ..types.indexing_job_create_response import IndexingJobCreateResponse -from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse - -__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] - - -class IndexingJobsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return IndexingJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return IndexingJobsResourceWithStreamingResponse(self) - - def create( - self, - *, - data_source_uuids: List[str] | NotGiven = NOT_GIVEN, - knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobCreateResponse: - """ - To start an indexing job for a knowledge base, send a POST request to - `/v2/gen-ai/indexing_jobs`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/indexing_jobs", - body=maybe_transform( - { - "data_source_uuids": data_source_uuids, - "knowledge_base_uuid": knowledge_base_uuid, - }, - indexing_job_create_params.IndexingJobCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobCreateResponse, - ) - - def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveResponse: - """ - To get status of an indexing Job for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/indexing_jobs/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobListResponse: - """ - To list all indexing jobs for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/indexing_jobs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - indexing_job_list_params.IndexingJobListParams, - ), - ), - cast_to=IndexingJobListResponse, - ) - - def retrieve_data_sources( - self, - indexing_job_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveDataSourcesResponse: - """ - To list all datasources for an indexing job, send a GET request to - `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not indexing_job_uuid: - raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") - return self._get( - f"/v2/genai/indexing_jobs/{indexing_job_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveDataSourcesResponse, - ) - - def update_cancel( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobUpdateCancelResponse: - """ - To cancel an indexing job for a knowledge base, send a PUT request to - `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. - - Args: - body_uuid: A unique identifier for an indexing job. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/indexing_jobs/{path_uuid}/cancel", - body=maybe_transform( - {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobUpdateCancelResponse, - ) - - -class AsyncIndexingJobsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncIndexingJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncIndexingJobsResourceWithStreamingResponse(self) - - async def create( - self, - *, - data_source_uuids: List[str] | NotGiven = NOT_GIVEN, - knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobCreateResponse: - """ - To start an indexing job for a knowledge base, send a POST request to - `/v2/gen-ai/indexing_jobs`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/indexing_jobs", - body=await async_maybe_transform( - { - "data_source_uuids": data_source_uuids, - "knowledge_base_uuid": knowledge_base_uuid, - }, - indexing_job_create_params.IndexingJobCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobCreateResponse, - ) - - async def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveResponse: - """ - To get status of an indexing Job for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/indexing_jobs/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobListResponse: - """ - To list all indexing jobs for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/indexing_jobs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - indexing_job_list_params.IndexingJobListParams, - ), - ), - cast_to=IndexingJobListResponse, - ) - - async def retrieve_data_sources( - self, - indexing_job_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveDataSourcesResponse: - """ - To list all datasources for an indexing job, send a GET request to - `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not indexing_job_uuid: - raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") - return await self._get( - f"/v2/genai/indexing_jobs/{indexing_job_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveDataSourcesResponse, - ) - - async def update_cancel( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobUpdateCancelResponse: - """ - To cancel an indexing job for a knowledge base, send a PUT request to - `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. - - Args: - body_uuid: A unique identifier for an indexing job. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/indexing_jobs/{path_uuid}/cancel", - body=await async_maybe_transform( - {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobUpdateCancelResponse, - ) - - -class IndexingJobsResourceWithRawResponse: - def __init__(self, indexing_jobs: IndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = to_raw_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = to_raw_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = to_raw_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = to_raw_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = to_raw_response_wrapper( - indexing_jobs.update_cancel, - ) - - -class AsyncIndexingJobsResourceWithRawResponse: - def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = async_to_raw_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = async_to_raw_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = async_to_raw_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = async_to_raw_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = async_to_raw_response_wrapper( - indexing_jobs.update_cancel, - ) - - -class IndexingJobsResourceWithStreamingResponse: - def __init__(self, indexing_jobs: IndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = to_streamed_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = to_streamed_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = to_streamed_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = to_streamed_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = to_streamed_response_wrapper( - indexing_jobs.update_cancel, - ) - - -class AsyncIndexingJobsResourceWithStreamingResponse: - def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = async_to_streamed_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = async_to_streamed_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = async_to_streamed_response_wrapper( - indexing_jobs.update_cancel, - ) diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py deleted file mode 100644 index 03d143e2..00000000 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .data_sources import ( - DataSourcesResource, - AsyncDataSourcesResource, - DataSourcesResourceWithRawResponse, - AsyncDataSourcesResourceWithRawResponse, - DataSourcesResourceWithStreamingResponse, - AsyncDataSourcesResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "DataSourcesResource", - "AsyncDataSourcesResource", - "DataSourcesResourceWithRawResponse", - "AsyncDataSourcesResourceWithRawResponse", - "DataSourcesResourceWithStreamingResponse", - "AsyncDataSourcesResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py deleted file mode 100644 index b8a29c4a..00000000 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py +++ /dev/null @@ -1,410 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.knowledge_bases import ( - data_source_list_params, - data_source_create_params, -) -from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse -from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse -from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse -from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam -from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam - -__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] - - -class DataSourcesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> DataSourcesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return DataSourcesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return DataSourcesResourceWithStreamingResponse(self) - - def create( - self, - path_knowledge_base_uuid: str, - *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, - body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, - web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceCreateResponse: - """ - To add a data source to a knowledge base, send a POST request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" - ) - return self._post( - f"/v2/genai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", - body=maybe_transform( - { - "aws_data_source": aws_data_source, - "body_knowledge_base_uuid": body_knowledge_base_uuid, - "spaces_data_source": spaces_data_source, - "web_crawler_data_source": web_crawler_data_source, - }, - data_source_create_params.DataSourceCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceCreateResponse, - ) - - def list( - self, - knowledge_base_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceListResponse: - """ - To list all data sources for a knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return self._get( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - data_source_list_params.DataSourceListParams, - ), - ), - cast_to=DataSourceListResponse, - ) - - def delete( - self, - data_source_uuid: str, - *, - knowledge_base_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceDeleteResponse: - """ - To delete a data source from a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - if not data_source_uuid: - raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") - return self._delete( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceDeleteResponse, - ) - - -class AsyncDataSourcesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncDataSourcesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncDataSourcesResourceWithStreamingResponse(self) - - async def create( - self, - path_knowledge_base_uuid: str, - *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, - body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, - web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceCreateResponse: - """ - To add a data source to a knowledge base, send a POST request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" - ) - return await self._post( - f"/v2/genai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", - body=await async_maybe_transform( - { - "aws_data_source": aws_data_source, - "body_knowledge_base_uuid": body_knowledge_base_uuid, - "spaces_data_source": spaces_data_source, - "web_crawler_data_source": web_crawler_data_source, - }, - data_source_create_params.DataSourceCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceCreateResponse, - ) - - async def list( - self, - knowledge_base_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceListResponse: - """ - To list all data sources for a knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return await self._get( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - data_source_list_params.DataSourceListParams, - ), - ), - cast_to=DataSourceListResponse, - ) - - async def delete( - self, - data_source_uuid: str, - *, - knowledge_base_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceDeleteResponse: - """ - To delete a data source from a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - if not data_source_uuid: - raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") - return await self._delete( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceDeleteResponse, - ) - - -class DataSourcesResourceWithRawResponse: - def __init__(self, data_sources: DataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = to_raw_response_wrapper( - data_sources.create, - ) - self.list = to_raw_response_wrapper( - data_sources.list, - ) - self.delete = to_raw_response_wrapper( - data_sources.delete, - ) - - -class AsyncDataSourcesResourceWithRawResponse: - def __init__(self, data_sources: AsyncDataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = async_to_raw_response_wrapper( - data_sources.create, - ) - self.list = async_to_raw_response_wrapper( - data_sources.list, - ) - self.delete = async_to_raw_response_wrapper( - data_sources.delete, - ) - - -class DataSourcesResourceWithStreamingResponse: - def __init__(self, data_sources: DataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = to_streamed_response_wrapper( - data_sources.create, - ) - self.list = to_streamed_response_wrapper( - data_sources.list, - ) - self.delete = to_streamed_response_wrapper( - data_sources.delete, - ) - - -class AsyncDataSourcesResourceWithStreamingResponse: - def __init__(self, data_sources: AsyncDataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = async_to_streamed_response_wrapper( - data_sources.create, - ) - self.list = async_to_streamed_response_wrapper( - data_sources.list, - ) - self.delete = async_to_streamed_response_wrapper( - data_sources.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py deleted file mode 100644 index 713aca63..00000000 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py +++ /dev/null @@ -1,667 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable - -import httpx - -from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .data_sources import ( - DataSourcesResource, - AsyncDataSourcesResource, - DataSourcesResourceWithRawResponse, - AsyncDataSourcesResourceWithRawResponse, - DataSourcesResourceWithStreamingResponse, - AsyncDataSourcesResourceWithStreamingResponse, -) -from ..._base_client import make_request_options -from ...types.knowledge_base_list_response import KnowledgeBaseListResponse -from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse -from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse -from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse -from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse - -__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] - - -class KnowledgeBasesResource(SyncAPIResource): - @cached_property - def data_sources(self) -> DataSourcesResource: - return DataSourcesResource(self._client) - - @cached_property - def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KnowledgeBasesResourceWithStreamingResponse(self) - - def create( - self, - *, - database_id: str | NotGiven = NOT_GIVEN, - datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseCreateResponse: - """ - To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. - - Args: - database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, - optional. If not provided, we create a new database for the knowledge base in - the same region as the knowledge base. - - datasources: The data sources to use for this knowledge base. See - [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) - for more information on data sources best practices. - - embedding_model_uuid: Identifier for the - [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). - - name: Name of the knowledge base. - - project_id: Identifier of the DigitalOcean project this knowledge base will belong to. - - region: The datacenter region to deploy the knowledge base in. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/knowledge_bases", - body=maybe_transform( - { - "database_id": database_id, - "datasources": datasources, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "region": region, - "tags": tags, - "vpc_uuid": vpc_uuid, - }, - knowledge_base_create_params.KnowledgeBaseCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseCreateResponse, - ) - - def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseRetrieveResponse: - """ - To retrive information about an existing knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseRetrieveResponse, - ) - - def update( - self, - path_uuid: str, - *, - database_id: str | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseUpdateResponse: - """ - To update a knowledge base, send a PUT request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. - - embedding_model_uuid: Identifier for the foundation model. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/knowledge_bases/{path_uuid}", - body=maybe_transform( - { - "database_id": database_id, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "tags": tags, - "body_uuid": body_uuid, - }, - knowledge_base_update_params.KnowledgeBaseUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseListResponse: - """ - To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - knowledge_base_list_params.KnowledgeBaseListParams, - ), - ), - cast_to=KnowledgeBaseListResponse, - ) - - def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDeleteResponse: - """ - To delete a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._delete( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDeleteResponse, - ) - - -class AsyncKnowledgeBasesResource(AsyncAPIResource): - @cached_property - def data_sources(self) -> AsyncDataSourcesResource: - return AsyncDataSourcesResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKnowledgeBasesResourceWithStreamingResponse(self) - - async def create( - self, - *, - database_id: str | NotGiven = NOT_GIVEN, - datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseCreateResponse: - """ - To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. - - Args: - database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, - optional. If not provided, we create a new database for the knowledge base in - the same region as the knowledge base. - - datasources: The data sources to use for this knowledge base. See - [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) - for more information on data sources best practices. - - embedding_model_uuid: Identifier for the - [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). - - name: Name of the knowledge base. - - project_id: Identifier of the DigitalOcean project this knowledge base will belong to. - - region: The datacenter region to deploy the knowledge base in. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/knowledge_bases", - body=await async_maybe_transform( - { - "database_id": database_id, - "datasources": datasources, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "region": region, - "tags": tags, - "vpc_uuid": vpc_uuid, - }, - knowledge_base_create_params.KnowledgeBaseCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseCreateResponse, - ) - - async def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseRetrieveResponse: - """ - To retrive information about an existing knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseRetrieveResponse, - ) - - async def update( - self, - path_uuid: str, - *, - database_id: str | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseUpdateResponse: - """ - To update a knowledge base, send a PUT request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. - - embedding_model_uuid: Identifier for the foundation model. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/knowledge_bases/{path_uuid}", - body=await async_maybe_transform( - { - "database_id": database_id, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "tags": tags, - "body_uuid": body_uuid, - }, - knowledge_base_update_params.KnowledgeBaseUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseListResponse: - """ - To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - knowledge_base_list_params.KnowledgeBaseListParams, - ), - ), - cast_to=KnowledgeBaseListResponse, - ) - - async def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDeleteResponse: - """ - To delete a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._delete( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDeleteResponse, - ) - - -class KnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = to_raw_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = to_raw_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = to_raw_response_wrapper( - knowledge_bases.update, - ) - self.list = to_raw_response_wrapper( - knowledge_bases.list, - ) - self.delete = to_raw_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> DataSourcesResourceWithRawResponse: - return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - - -class AsyncKnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = async_to_raw_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = async_to_raw_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = async_to_raw_response_wrapper( - knowledge_bases.update, - ) - self.list = async_to_raw_response_wrapper( - knowledge_bases.list, - ) - self.delete = async_to_raw_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: - return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - - -class KnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = to_streamed_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = to_streamed_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = to_streamed_response_wrapper( - knowledge_bases.update, - ) - self.list = to_streamed_response_wrapper( - knowledge_bases.list, - ) - self.delete = to_streamed_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> DataSourcesResourceWithStreamingResponse: - return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) - - -class AsyncKnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = async_to_streamed_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - knowledge_bases.update, - ) - self.list = async_to_streamed_response_wrapper( - knowledge_bases.list, - ) - self.delete = async_to_streamed_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: - return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) diff --git a/src/digitalocean_genai_sdk/resources/providers/__init__.py b/src/digitalocean_genai_sdk/resources/providers/__init__.py deleted file mode 100644 index 1731e057..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) - -__all__ = [ - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py deleted file mode 100644 index 057a3a2f..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py deleted file mode 100644 index 64783563..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["AnthropicResource", "AsyncAnthropicResource"] - - -class AnthropicResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AnthropicResourceWithStreamingResponse(self) - - -class AsyncAnthropicResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAnthropicResourceWithStreamingResponse(self) - - -class AnthropicResourceWithRawResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithRawResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._anthropic.keys) - - -class AnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py deleted file mode 100644 index 1f65a5ab..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py +++ /dev/null @@ -1,662 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params -from ....types.providers.anthropic.key_list_response import KeyListResponse -from ....types.providers.anthropic.key_create_response import KeyCreateResponse -from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse -from ....types.providers.anthropic.key_update_response import KeyUpdateResponse -from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an Anthropic API key, send a POST request to - `/v2/gen-ai/anthropic/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/anthropic/keys", - body=maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an Anthropic API key, send a GET request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._get( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an Anthropic API key, send a PUT request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/anthropic/keys/{path_api_key_uuid}", - body=maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all Anthropic API keys, send a GET request to - `/v2/gen-ai/anthropic/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/anthropic/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an Anthropic API key, send a DELETE request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - def list_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: - """ - List Agents by Anthropic Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/anthropic/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_agents_params.KeyListAgentsParams, - ), - ), - cast_to=KeyListAgentsResponse, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an Anthropic API key, send a POST request to - `/v2/gen-ai/anthropic/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/anthropic/keys", - body=await async_maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an Anthropic API key, send a GET request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._get( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an Anthropic API key, send a PUT request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/anthropic/keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all Anthropic API keys, send a GET request to - `/v2/gen-ai/anthropic/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/anthropic/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an Anthropic API key, send a DELETE request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - async def list_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: - """ - List Agents by Anthropic Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/anthropic/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_agents_params.KeyListAgentsParams, - ), - ), - cast_to=KeyListAgentsResponse, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - self.list_agents = to_raw_response_wrapper( - keys.list_agents, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - self.list_agents = async_to_raw_response_wrapper( - keys.list_agents, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - self.list_agents = to_streamed_response_wrapper( - keys.list_agents, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) - self.list_agents = async_to_streamed_response_wrapper( - keys.list_agents, - ) diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py b/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py deleted file mode 100644 index 66d8ca7a..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/keys.py b/src/digitalocean_genai_sdk/resources/providers/openai/keys.py deleted file mode 100644 index 06e7a23c..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/openai/keys.py +++ /dev/null @@ -1,658 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params -from ....types.providers.openai.key_list_response import KeyListResponse -from ....types.providers.openai.key_create_response import KeyCreateResponse -from ....types.providers.openai.key_delete_response import KeyDeleteResponse -from ....types.providers.openai.key_update_response import KeyUpdateResponse -from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/openai/keys", - body=maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an OpenAI API key, send a GET request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._get( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an OpenAI API key, send a PUT request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/openai/keys/{path_api_key_uuid}", - body=maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/openai/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an OpenAI API key, send a DELETE request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - def retrieve_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: - """ - List Agents by OpenAI Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/openai/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, - ), - ), - cast_to=KeyRetrieveAgentsResponse, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/openai/keys", - body=await async_maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an OpenAI API key, send a GET request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._get( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an OpenAI API key, send a PUT request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/openai/keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/openai/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an OpenAI API key, send a DELETE request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - async def retrieve_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: - """ - List Agents by OpenAI Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/openai/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, - ), - ), - cast_to=KeyRetrieveAgentsResponse, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - self.retrieve_agents = to_raw_response_wrapper( - keys.retrieve_agents, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - self.retrieve_agents = async_to_raw_response_wrapper( - keys.retrieve_agents, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - self.retrieve_agents = to_streamed_response_wrapper( - keys.retrieve_agents, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) - self.retrieve_agents = async_to_streamed_response_wrapper( - keys.retrieve_agents, - ) diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/openai.py b/src/digitalocean_genai_sdk/resources/providers/openai/openai.py deleted file mode 100644 index d29fd062..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/openai/openai.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["OpenAIResource", "AsyncOpenAIResource"] - - -class OpenAIResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> OpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return OpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return OpenAIResourceWithStreamingResponse(self) - - -class AsyncOpenAIResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncOpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncOpenAIResourceWithStreamingResponse(self) - - -class OpenAIResourceWithRawResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithRawResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._openai.keys) - - -class OpenAIResourceWithStreamingResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithStreamingResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/digitalocean_genai_sdk/resources/providers/providers.py b/src/digitalocean_genai_sdk/resources/providers/providers.py deleted file mode 100644 index 50e3db1a..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/providers.py +++ /dev/null @@ -1,134 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .openai.openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) -from .anthropic.anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = ["ProvidersResource", "AsyncProvidersResource"] - - -class ProvidersResource(SyncAPIResource): - @cached_property - def anthropic(self) -> AnthropicResource: - return AnthropicResource(self._client) - - @cached_property - def openai(self) -> OpenAIResource: - return OpenAIResource(self._client) - - @cached_property - def with_raw_response(self) -> ProvidersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ProvidersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ProvidersResourceWithStreamingResponse(self) - - -class AsyncProvidersResource(AsyncAPIResource): - @cached_property - def anthropic(self) -> AsyncAnthropicResource: - return AsyncAnthropicResource(self._client) - - @cached_property - def openai(self) -> AsyncOpenAIResource: - return AsyncOpenAIResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncProvidersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncProvidersResourceWithStreamingResponse(self) - - -class ProvidersResourceWithRawResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AnthropicResourceWithRawResponse: - return AnthropicResourceWithRawResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> OpenAIResourceWithRawResponse: - return OpenAIResourceWithRawResponse(self._providers.openai) - - -class AsyncProvidersResourceWithRawResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: - return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> AsyncOpenAIResourceWithRawResponse: - return AsyncOpenAIResourceWithRawResponse(self._providers.openai) - - -class ProvidersResourceWithStreamingResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AnthropicResourceWithStreamingResponse: - return AnthropicResourceWithStreamingResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> OpenAIResourceWithStreamingResponse: - return OpenAIResourceWithStreamingResponse(self._providers.openai) - - -class AsyncProvidersResourceWithStreamingResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: - return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: - return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/digitalocean_genai_sdk/resources/regions.py b/src/digitalocean_genai_sdk/resources/regions.py deleted file mode 100644 index d506688b..00000000 --- a/src/digitalocean_genai_sdk/resources/regions.py +++ /dev/null @@ -1,191 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..types import region_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.region_list_response import RegionListResponse - -__all__ = ["RegionsResource", "AsyncRegionsResource"] - - -class RegionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> RegionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return RegionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return RegionsResourceWithStreamingResponse(self) - - def list( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - region_list_params.RegionListParams, - ), - ), - cast_to=RegionListResponse, - ) - - -class AsyncRegionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRegionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncRegionsResourceWithStreamingResponse(self) - - async def list( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - region_list_params.RegionListParams, - ), - ), - cast_to=RegionListResponse, - ) - - -class RegionsResourceWithRawResponse: - def __init__(self, regions: RegionsResource) -> None: - self._regions = regions - - self.list = to_raw_response_wrapper( - regions.list, - ) - - -class AsyncRegionsResourceWithRawResponse: - def __init__(self, regions: AsyncRegionsResource) -> None: - self._regions = regions - - self.list = async_to_raw_response_wrapper( - regions.list, - ) - - -class RegionsResourceWithStreamingResponse: - def __init__(self, regions: RegionsResource) -> None: - self._regions = regions - - self.list = to_streamed_response_wrapper( - regions.list, - ) - - -class AsyncRegionsResourceWithStreamingResponse: - def __init__(self, regions: AsyncRegionsResource) -> None: - self._regions = regions - - self.list = async_to_streamed_response_wrapper( - regions.list, - ) diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py deleted file mode 100644 index ee516f83..00000000 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .model import Model as Model -from .api_agent import APIAgent as APIAgent -from .api_model import APIModel as APIModel -from .api_agreement import APIAgreement as APIAgreement -from .api_indexing_job import APIIndexingJob as APIIndexingJob -from .agent_list_params import AgentListParams as AgentListParams -from .api_model_version import APIModelVersion as APIModelVersion -from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase -from .region_list_params import RegionListParams as RegionListParams -from .agent_create_params import AgentCreateParams as AgentCreateParams -from .agent_list_response import AgentListResponse as AgentListResponse -from .agent_update_params import AgentUpdateParams as AgentUpdateParams -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .model_list_response import ModelListResponse as ModelListResponse -from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod -from .region_list_response import RegionListResponse as RegionListResponse -from .agent_create_response import AgentCreateResponse as AgentCreateResponse -from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse -from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams -from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse -from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse -from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse -from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams -from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse -from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob -from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse -from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse -from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse -from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse -from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse -from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) -from .chat_completion_request_message_content_part_text_param import ( - ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, -) diff --git a/src/digitalocean_genai_sdk/types/agent_create_params.py b/src/digitalocean_genai_sdk/types/agent_create_params.py deleted file mode 100644 index 58b99df7..00000000 --- a/src/digitalocean_genai_sdk/types/agent_create_params.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["AgentCreateParams"] - - -class AgentCreateParams(TypedDict, total=False): - anthropic_key_uuid: str - - description: str - - instruction: str - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - knowledge_base_uuid: List[str] - - model_uuid: str - """Identifier for the foundation model.""" - - name: str - - openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] - - project_id: str - - region: str - - tags: List[str] diff --git a/src/digitalocean_genai_sdk/types/agent_create_response.py b/src/digitalocean_genai_sdk/types/agent_create_response.py deleted file mode 100644 index 48545fe9..00000000 --- a/src/digitalocean_genai_sdk/types/agent_create_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentCreateResponse"] - - -class AgentCreateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_delete_response.py b/src/digitalocean_genai_sdk/types/agent_delete_response.py deleted file mode 100644 index eb1d440d..00000000 --- a/src/digitalocean_genai_sdk/types/agent_delete_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentDeleteResponse"] - - -class AgentDeleteResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_list_params.py b/src/digitalocean_genai_sdk/types/agent_list_params.py deleted file mode 100644 index e13a10c9..00000000 --- a/src/digitalocean_genai_sdk/types/agent_list_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["AgentListParams"] - - -class AgentListParams(TypedDict, total=False): - only_deployed: bool - """only list agents that are deployed.""" - - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/agent_list_response.py b/src/digitalocean_genai_sdk/types/agent_list_response.py deleted file mode 100644 index 4cedbb39..00000000 --- a/src/digitalocean_genai_sdk/types/agent_list_response.py +++ /dev/null @@ -1,198 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .api_model import APIModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_knowledge_base import APIKnowledgeBase -from .api_retrieval_method import APIRetrievalMethod -from .api_deployment_visibility import APIDeploymentVisibility - -__all__ = [ - "AgentListResponse", - "Agent", - "AgentChatbot", - "AgentChatbotIdentifier", - "AgentDeployment", - "AgentTemplate", - "AgentTemplateGuardrail", -] - - -class AgentChatbot(BaseModel): - button_background_color: Optional[str] = None - - logo: Optional[str] = None - - name: Optional[str] = None - - primary_color: Optional[str] = None - - secondary_color: Optional[str] = None - - starting_message: Optional[str] = None - - -class AgentChatbotIdentifier(BaseModel): - agent_chatbot_identifier: Optional[str] = None - - -class AgentDeployment(BaseModel): - created_at: Optional[datetime] = None - - name: Optional[str] = None - - status: Optional[ - Literal[ - "STATUS_UNKNOWN", - "STATUS_WAITING_FOR_DEPLOYMENT", - "STATUS_DEPLOYING", - "STATUS_RUNNING", - "STATUS_FAILED", - "STATUS_WAITING_FOR_UNDEPLOYMENT", - "STATUS_UNDEPLOYING", - "STATUS_UNDEPLOYMENT_FAILED", - "STATUS_DELETED", - ] - ] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - visibility: Optional[APIDeploymentVisibility] = None - - -class AgentTemplateGuardrail(BaseModel): - priority: Optional[int] = None - - uuid: Optional[str] = None - - -class AgentTemplate(BaseModel): - created_at: Optional[datetime] = None - - description: Optional[str] = None - - guardrails: Optional[List[AgentTemplateGuardrail]] = None - - instruction: Optional[str] = None - - k: Optional[int] = None - - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - long_description: Optional[str] = None - - max_tokens: Optional[int] = None - - model: Optional[APIModel] = None - - name: Optional[str] = None - - short_description: Optional[str] = None - - summary: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - - template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None - - top_p: Optional[float] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - -class Agent(BaseModel): - chatbot: Optional[AgentChatbot] = None - - chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None - - created_at: Optional[datetime] = None - - deployment: Optional[AgentDeployment] = None - - description: Optional[str] = None - - if_case: Optional[str] = None - - instruction: Optional[str] = None - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - k: Optional[int] = None - - max_tokens: Optional[int] = None - """ - Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - """ - - model: Optional[APIModel] = None - - name: Optional[str] = None - - project_id: Optional[str] = None - - provide_citations: Optional[bool] = None - - region: Optional[str] = None - - retrieval_method: Optional[APIRetrievalMethod] = None - - route_created_at: Optional[datetime] = None - - route_created_by: Optional[str] = None - - route_name: Optional[str] = None - - route_uuid: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - """Controls the model’s creativity, specified as a number between 0 and 1. - - Lower values produce more predictable and conservative responses, while higher - values encourage creativity and variation. - """ - - template: Optional[AgentTemplate] = None - - top_p: Optional[float] = None - """ - Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - """ - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - user_id: Optional[str] = None - - uuid: Optional[str] = None - - -class AgentListResponse(BaseModel): - agents: Optional[List[Agent]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/agent_retrieve_response.py b/src/digitalocean_genai_sdk/types/agent_retrieve_response.py deleted file mode 100644 index 2eed88af..00000000 --- a/src/digitalocean_genai_sdk/types/agent_retrieve_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentRetrieveResponse"] - - -class AgentRetrieveResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_update_params.py b/src/digitalocean_genai_sdk/types/agent_update_params.py deleted file mode 100644 index 85f9a9c2..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_params.py +++ /dev/null @@ -1,65 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo -from .api_retrieval_method import APIRetrievalMethod - -__all__ = ["AgentUpdateParams"] - - -class AgentUpdateParams(TypedDict, total=False): - anthropic_key_uuid: str - - description: str - - instruction: str - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - k: int - - max_tokens: int - """ - Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - """ - - model_uuid: str - """Identifier for the foundation model.""" - - name: str - - openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] - - project_id: str - - provide_citations: bool - - retrieval_method: APIRetrievalMethod - - tags: List[str] - - temperature: float - """Controls the model’s creativity, specified as a number between 0 and 1. - - Lower values produce more predictable and conservative responses, while higher - values encourage creativity and variation. - """ - - top_p: float - """ - Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - """ - - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/digitalocean_genai_sdk/types/agent_update_response.py b/src/digitalocean_genai_sdk/types/agent_update_response.py deleted file mode 100644 index 2948aa1c..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentUpdateResponse"] - - -class AgentUpdateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_update_status_params.py b/src/digitalocean_genai_sdk/types/agent_update_status_params.py deleted file mode 100644 index a0cdc0b9..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_status_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo -from .api_deployment_visibility import APIDeploymentVisibility - -__all__ = ["AgentUpdateStatusParams"] - - -class AgentUpdateStatusParams(TypedDict, total=False): - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - - visibility: APIDeploymentVisibility diff --git a/src/digitalocean_genai_sdk/types/agent_update_status_response.py b/src/digitalocean_genai_sdk/types/agent_update_status_response.py deleted file mode 100644 index b200f99d..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_status_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentUpdateStatusResponse"] - - -class AgentUpdateStatusResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/__init__.py b/src/digitalocean_genai_sdk/types/agents/__init__.py deleted file mode 100644 index aae0ee6b..00000000 --- a/src/digitalocean_genai_sdk/types/agents/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_meta import APIMeta as APIMeta -from .api_links import APILinks as APILinks -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .version_list_params import VersionListParams as VersionListParams -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams -from .version_list_response import VersionListResponse as VersionListResponse -from .version_update_params import VersionUpdateParams as VersionUpdateParams -from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams -from .function_create_params import FunctionCreateParams as FunctionCreateParams -from .function_update_params import FunctionUpdateParams as FunctionUpdateParams -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .version_update_response import VersionUpdateResponse as VersionUpdateResponse -from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse -from .function_create_response import FunctionCreateResponse as FunctionCreateResponse -from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse -from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse -from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams -from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse -from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse -from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse -from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse -from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput -from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py deleted file mode 100644 index c3fc44cd..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["APIKeyCreateParams"] - - -class APIKeyCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py deleted file mode 100644 index 09689fe7..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyCreateResponse"] - - -class APIKeyCreateResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py deleted file mode 100644 index 02b03f61..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyDeleteResponse"] - - -class APIKeyDeleteResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py deleted file mode 100644 index 11da9398..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py deleted file mode 100644 index eff98649..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .api_meta import APIMeta -from ..._models import BaseModel -from .api_links import APILinks -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py deleted file mode 100644 index ea2f761e..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyRegenerateResponse"] - - -class APIKeyRegenerateResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py deleted file mode 100644 index b49ebb38..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["APIKeyUpdateParams"] - - -class APIKeyUpdateParams(TypedDict, total=False): - path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] - - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py deleted file mode 100644 index 87442329..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyUpdateResponse"] - - -class APIKeyUpdateResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py b/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py deleted file mode 100644 index a38f021b..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["APILinkKnowledgeBaseOutput"] - - -class APILinkKnowledgeBaseOutput(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py b/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py deleted file mode 100644 index 001baa6f..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["ChildAgentAddParams"] - - -class ChildAgentAddParams(TypedDict, total=False): - path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] - - body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] - - if_case: str - - body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] - """A unique identifier for the parent agent.""" - - route_name: str diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py deleted file mode 100644 index baccec10..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentAddResponse"] - - -class ChildAgentAddResponse(BaseModel): - child_agent_uuid: Optional[str] = None - - parent_agent_uuid: Optional[str] = None - """A unique identifier for the parent agent.""" diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py deleted file mode 100644 index b50fb024..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentDeleteResponse"] - - -class ChildAgentDeleteResponse(BaseModel): - child_agent_uuid: Optional[str] = None - - parent_agent_uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py b/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py deleted file mode 100644 index 2f009a52..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["ChildAgentUpdateParams"] - - -class ChildAgentUpdateParams(TypedDict, total=False): - path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] - - body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] - - if_case: str - - body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] - """A unique identifier for the parent agent.""" - - route_name: str - - uuid: str diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py deleted file mode 100644 index 48a13c72..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentUpdateResponse"] - - -class ChildAgentUpdateResponse(BaseModel): - child_agent_uuid: Optional[str] = None - - parent_agent_uuid: Optional[str] = None - """A unique identifier for the parent agent.""" - - rollback: Optional[bool] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py deleted file mode 100644 index ffbaef12..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentViewResponse"] - - -class ChildAgentViewResponse(BaseModel): - children: Optional[List["APIAgent"]] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/function_create_params.py b/src/digitalocean_genai_sdk/types/agents/function_create_params.py deleted file mode 100644 index 938fb1d5..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_create_params.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["FunctionCreateParams"] - - -class FunctionCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - description: str - - faas_name: str - - faas_namespace: str - - function_name: str - - input_schema: object - - output_schema: object diff --git a/src/digitalocean_genai_sdk/types/agents/function_create_response.py b/src/digitalocean_genai_sdk/types/agents/function_create_response.py deleted file mode 100644 index 82ab984b..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_create_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["FunctionCreateResponse"] - - -class FunctionCreateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/function_delete_response.py b/src/digitalocean_genai_sdk/types/agents/function_delete_response.py deleted file mode 100644 index 678ef62d..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_delete_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["FunctionDeleteResponse"] - - -class FunctionDeleteResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/function_update_params.py b/src/digitalocean_genai_sdk/types/agents/function_update_params.py deleted file mode 100644 index 2fa8e8f0..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_update_params.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["FunctionUpdateParams"] - - -class FunctionUpdateParams(TypedDict, total=False): - path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] - - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - description: str - - faas_name: str - - faas_namespace: str - - function_name: str - - body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] - - input_schema: object - - output_schema: object diff --git a/src/digitalocean_genai_sdk/types/agents/function_update_response.py b/src/digitalocean_genai_sdk/types/agents/function_update_response.py deleted file mode 100644 index 82fc63be..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_update_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["FunctionUpdateResponse"] - - -class FunctionUpdateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py b/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py deleted file mode 100644 index 76bb4236..00000000 --- a/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["KnowledgeBaseDetachResponse"] - - -class KnowledgeBaseDetachResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/api_agent.py b/src/digitalocean_genai_sdk/types/api_agent.py deleted file mode 100644 index d6e18ca2..00000000 --- a/src/digitalocean_genai_sdk/types/api_agent.py +++ /dev/null @@ -1,263 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .api_model import APIModel -from .api_knowledge_base import APIKnowledgeBase -from .api_retrieval_method import APIRetrievalMethod -from .api_agent_api_key_info import APIAgentAPIKeyInfo -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo -from .api_deployment_visibility import APIDeploymentVisibility -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = [ - "APIAgent", - "APIKey", - "Chatbot", - "ChatbotIdentifier", - "Deployment", - "Function", - "Guardrail", - "Template", - "TemplateGuardrail", -] - - -class APIKey(BaseModel): - api_key: Optional[str] = None - - -class Chatbot(BaseModel): - button_background_color: Optional[str] = None - - logo: Optional[str] = None - - name: Optional[str] = None - - primary_color: Optional[str] = None - - secondary_color: Optional[str] = None - - starting_message: Optional[str] = None - - -class ChatbotIdentifier(BaseModel): - agent_chatbot_identifier: Optional[str] = None - - -class Deployment(BaseModel): - created_at: Optional[datetime] = None - - name: Optional[str] = None - - status: Optional[ - Literal[ - "STATUS_UNKNOWN", - "STATUS_WAITING_FOR_DEPLOYMENT", - "STATUS_DEPLOYING", - "STATUS_RUNNING", - "STATUS_FAILED", - "STATUS_WAITING_FOR_UNDEPLOYMENT", - "STATUS_UNDEPLOYING", - "STATUS_UNDEPLOYMENT_FAILED", - "STATUS_DELETED", - ] - ] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - visibility: Optional[APIDeploymentVisibility] = None - - -class Function(BaseModel): - api_key: Optional[str] = None - - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - description: Optional[str] = None - - faas_name: Optional[str] = None - - faas_namespace: Optional[str] = None - - input_schema: Optional[object] = None - - name: Optional[str] = None - - output_schema: Optional[object] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class Guardrail(BaseModel): - agent_uuid: Optional[str] = None - - created_at: Optional[datetime] = None - - default_response: Optional[str] = None - - description: Optional[str] = None - - guardrail_uuid: Optional[str] = None - - is_attached: Optional[bool] = None - - is_default: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - priority: Optional[int] = None - - type: Optional[ - Literal[ - "GUARDRAIL_TYPE_UNKNOWN", - "GUARDRAIL_TYPE_JAILBREAK", - "GUARDRAIL_TYPE_SENSITIVE_DATA", - "GUARDRAIL_TYPE_CONTENT_MODERATION", - ] - ] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - -class TemplateGuardrail(BaseModel): - priority: Optional[int] = None - - uuid: Optional[str] = None - - -class Template(BaseModel): - created_at: Optional[datetime] = None - - description: Optional[str] = None - - guardrails: Optional[List[TemplateGuardrail]] = None - - instruction: Optional[str] = None - - k: Optional[int] = None - - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - long_description: Optional[str] = None - - max_tokens: Optional[int] = None - - model: Optional[APIModel] = None - - name: Optional[str] = None - - short_description: Optional[str] = None - - summary: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - - template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None - - top_p: Optional[float] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - -class APIAgent(BaseModel): - anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None - - api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None - - api_keys: Optional[List[APIKey]] = None - - chatbot: Optional[Chatbot] = None - - chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None - - child_agents: Optional[List["APIAgent"]] = None - - created_at: Optional[datetime] = None - - deployment: Optional[Deployment] = None - - description: Optional[str] = None - - functions: Optional[List[Function]] = None - - guardrails: Optional[List[Guardrail]] = None - - if_case: Optional[str] = None - - instruction: Optional[str] = None - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - k: Optional[int] = None - - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - max_tokens: Optional[int] = None - - model: Optional[APIModel] = None - - name: Optional[str] = None - - openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None - - parent_agents: Optional[List["APIAgent"]] = None - - project_id: Optional[str] = None - - provide_citations: Optional[bool] = None - - region: Optional[str] = None - - retrieval_method: Optional[APIRetrievalMethod] = None - - route_created_at: Optional[datetime] = None - - route_created_by: Optional[str] = None - - route_name: Optional[str] = None - - route_uuid: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - - template: Optional[Template] = None - - top_p: Optional[float] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - user_id: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py b/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py deleted file mode 100644 index 8dc71564..00000000 --- a/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["APIAgentAPIKeyInfo"] - - -class APIAgentAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - name: Optional[str] = None - - secret_key: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_agreement.py b/src/digitalocean_genai_sdk/types/api_agreement.py deleted file mode 100644 index c4359f1f..00000000 --- a/src/digitalocean_genai_sdk/types/api_agreement.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIAgreement"] - - -class APIAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py b/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py deleted file mode 100644 index e2e04a8a..00000000 --- a/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["APIAnthropicAPIKeyInfo"] - - -class APIAnthropicAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - name: Optional[str] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_indexing_job.py b/src/digitalocean_genai_sdk/types/api_indexing_job.py deleted file mode 100644 index f24aac94..00000000 --- a/src/digitalocean_genai_sdk/types/api_indexing_job.py +++ /dev/null @@ -1,43 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["APIIndexingJob"] - - -class APIIndexingJob(BaseModel): - completed_datasources: Optional[int] = None - - created_at: Optional[datetime] = None - - data_source_uuids: Optional[List[str]] = None - - finished_at: Optional[datetime] = None - - knowledge_base_uuid: Optional[str] = None - - phase: Optional[ - Literal[ - "BATCH_JOB_PHASE_UNKNOWN", - "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", - "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", - "BATCH_JOB_PHASE_ERROR", - "BATCH_JOB_PHASE_CANCELLED", - ] - ] = None - - started_at: Optional[datetime] = None - - tokens: Optional[int] = None - - total_datasources: Optional[int] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_key_list_params.py b/src/digitalocean_genai_sdk/types/api_key_list_params.py deleted file mode 100644 index a1ab60dc..00000000 --- a/src/digitalocean_genai_sdk/types/api_key_list_params.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" - - public_only: bool - """only include models that are publicly available.""" - - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - """include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - """ diff --git a/src/digitalocean_genai_sdk/types/api_key_list_response.py b/src/digitalocean_genai_sdk/types/api_key_list_response.py deleted file mode 100644 index 360de7a4..00000000 --- a/src/digitalocean_genai_sdk/types/api_key_list_response.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from .._models import BaseModel -from .api_agreement import APIAgreement -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_model_version import APIModelVersion - -__all__ = ["APIKeyListResponse", "Model"] - - -class Model(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None - - -class APIKeyListResponse(BaseModel): - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - models: Optional[List[Model]] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/__init__.py b/src/digitalocean_genai_sdk/types/api_keys/__init__.py deleted file mode 100644 index c3cbcd6d..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams -from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py deleted file mode 100644 index 16cc23c9..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyCreateParams"] - - -class APIKeyCreateParams(TypedDict, total=False): - name: str diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py deleted file mode 100644 index 654e9f1e..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyCreateResponse"] - - -class APIKeyCreateResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py deleted file mode 100644 index 4d81d047..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyDeleteResponse"] - - -class APIKeyDeleteResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py deleted file mode 100644 index 11da9398..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py deleted file mode 100644 index 535e2f96..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py deleted file mode 100644 index 23c1c0b9..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["APIKeyUpdateParams"] - - -class APIKeyUpdateParams(TypedDict, total=False): - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py deleted file mode 100644 index 44a316dc..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyUpdateRegenerateResponse"] - - -class APIKeyUpdateRegenerateResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py deleted file mode 100644 index 3671addf..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyUpdateResponse"] - - -class APIKeyUpdateResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py b/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py deleted file mode 100644 index bf354a47..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel - -__all__ = ["APIModelAPIKeyInfo"] - - -class APIModelAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - name: Optional[str] = None - - secret_key: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_knowledge_base.py b/src/digitalocean_genai_sdk/types/api_knowledge_base.py deleted file mode 100644 index 5b4b6e2c..00000000 --- a/src/digitalocean_genai_sdk/types/api_knowledge_base.py +++ /dev/null @@ -1,37 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["APIKnowledgeBase"] - - -class APIKnowledgeBase(BaseModel): - added_to_agent_at: Optional[datetime] = None - - created_at: Optional[datetime] = None - - database_id: Optional[str] = None - - embedding_model_uuid: Optional[str] = None - - is_public: Optional[bool] = None - - last_indexing_job: Optional[APIIndexingJob] = None - - name: Optional[str] = None - - project_id: Optional[str] = None - - region: Optional[str] = None - - tags: Optional[List[str]] = None - - updated_at: Optional[datetime] = None - - user_id: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_model.py b/src/digitalocean_genai_sdk/types/api_model.py deleted file mode 100644 index d680a638..00000000 --- a/src/digitalocean_genai_sdk/types/api_model.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .api_agreement import APIAgreement -from .api_model_version import APIModelVersion - -__all__ = ["APIModel"] - - -class APIModel(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None diff --git a/src/digitalocean_genai_sdk/types/api_model_version.py b/src/digitalocean_genai_sdk/types/api_model_version.py deleted file mode 100644 index 2e118632..00000000 --- a/src/digitalocean_genai_sdk/types/api_model_version.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIModelVersion"] - - -class APIModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None diff --git a/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py b/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py deleted file mode 100644 index 39328f80..00000000 --- a/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from .._models import BaseModel -from .api_model import APIModel - -__all__ = ["APIOpenAIAPIKeyInfo"] - - -class APIOpenAIAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - models: Optional[List[APIModel]] = None - - name: Optional[str] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/auth/agents/__init__.py b/src/digitalocean_genai_sdk/types/auth/agents/__init__.py deleted file mode 100644 index 9fae55b6..00000000 --- a/src/digitalocean_genai_sdk/types/auth/agents/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .token_create_params import TokenCreateParams as TokenCreateParams -from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py b/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py deleted file mode 100644 index 0df640f9..00000000 --- a/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["TokenCreateParams"] - - -class TokenCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py b/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py deleted file mode 100644 index e58b7399..00000000 --- a/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel - -__all__ = ["TokenCreateResponse"] - - -class TokenCreateResponse(BaseModel): - access_token: Optional[str] = None - - refresh_token: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_create_params.py b/src/digitalocean_genai_sdk/types/indexing_job_create_params.py deleted file mode 100644 index 04838472..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_create_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -__all__ = ["IndexingJobCreateParams"] - - -class IndexingJobCreateParams(TypedDict, total=False): - data_source_uuids: List[str] - - knowledge_base_uuid: str diff --git a/src/digitalocean_genai_sdk/types/indexing_job_create_response.py b/src/digitalocean_genai_sdk/types/indexing_job_create_response.py deleted file mode 100644 index 839bc83b..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobCreateResponse"] - - -class IndexingJobCreateResponse(BaseModel): - job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_list_params.py b/src/digitalocean_genai_sdk/types/indexing_job_list_params.py deleted file mode 100644 index 90206aba..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["IndexingJobListParams"] - - -class IndexingJobListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/indexing_job_list_response.py b/src/digitalocean_genai_sdk/types/indexing_job_list_response.py deleted file mode 100644 index 1379cc55..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobListResponse"] - - -class IndexingJobListResponse(BaseModel): - jobs: Optional[List[APIIndexingJob]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py deleted file mode 100644 index b178b984..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py +++ /dev/null @@ -1,52 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] - - -class IndexedDataSource(BaseModel): - completed_at: Optional[datetime] = None - - data_source_uuid: Optional[str] = None - - error_details: Optional[str] = None - - error_msg: Optional[str] = None - - failed_item_count: Optional[str] = None - - indexed_file_count: Optional[str] = None - - indexed_item_count: Optional[str] = None - - removed_item_count: Optional[str] = None - - skipped_item_count: Optional[str] = None - - started_at: Optional[datetime] = None - - status: Optional[ - Literal[ - "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", - "DATA_SOURCE_STATUS_FAILED", - ] - ] = None - - total_bytes: Optional[str] = None - - total_bytes_indexed: Optional[str] = None - - total_file_count: Optional[str] = None - - -class IndexingJobRetrieveDataSourcesResponse(BaseModel): - indexed_data_sources: Optional[List[IndexedDataSource]] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py deleted file mode 100644 index 95f33d7a..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobRetrieveResponse"] - - -class IndexingJobRetrieveResponse(BaseModel): - job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py deleted file mode 100644 index 4c2848b0..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["IndexingJobUpdateCancelParams"] - - -class IndexingJobUpdateCancelParams(TypedDict, total=False): - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - """A unique identifier for an indexing job.""" diff --git a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py deleted file mode 100644 index d50e1865..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobUpdateCancelResponse"] - - -class IndexingJobUpdateCancelResponse(BaseModel): - job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py deleted file mode 100644 index 3a58166b..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py +++ /dev/null @@ -1,64 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable -from typing_extensions import TypedDict - -from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam -from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam -from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam - -__all__ = ["KnowledgeBaseCreateParams", "Datasource"] - - -class KnowledgeBaseCreateParams(TypedDict, total=False): - database_id: str - """ - Identifier of the DigitalOcean OpenSearch database this knowledge base will use, - optional. If not provided, we create a new database for the knowledge base in - the same region as the knowledge base. - """ - - datasources: Iterable[Datasource] - """The data sources to use for this knowledge base. - - See - [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) - for more information on data sources best practices. - """ - - embedding_model_uuid: str - """ - Identifier for the - [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). - """ - - name: str - """Name of the knowledge base.""" - - project_id: str - """Identifier of the DigitalOcean project this knowledge base will belong to.""" - - region: str - """The datacenter region to deploy the knowledge base in.""" - - tags: List[str] - """Tags to organize your knowledge base.""" - - vpc_uuid: str - - -class Datasource(TypedDict, total=False): - bucket_name: str - - bucket_region: str - - file_upload_data_source: APIFileUploadDataSourceParam - """File to upload as data source for knowledge base.""" - - item_path: str - - spaces_data_source: APISpacesDataSourceParam - - web_crawler_data_source: APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py deleted file mode 100644 index cc2d8b9f..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseCreateResponse"] - - -class KnowledgeBaseCreateResponse(BaseModel): - knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py deleted file mode 100644 index 6401e25a..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["KnowledgeBaseDeleteResponse"] - - -class KnowledgeBaseDeleteResponse(BaseModel): - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py deleted file mode 100644 index dcf9a0ec..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KnowledgeBaseListParams"] - - -class KnowledgeBaseListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py deleted file mode 100644 index 09ca1ad3..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseListResponse"] - - -class KnowledgeBaseListResponse(BaseModel): - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py deleted file mode 100644 index 5a3b5f2c..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseRetrieveResponse"] - - -class KnowledgeBaseRetrieveResponse(BaseModel): - database_status: Optional[ - Literal[ - "CREATING", - "ONLINE", - "POWEROFF", - "REBUILDING", - "REBALANCING", - "DECOMMISSIONED", - "FORKING", - "MIGRATING", - "RESIZING", - "RESTORING", - "POWERING_ON", - "UNHEALTHY", - ] - ] = None - - knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py deleted file mode 100644 index 297c79de..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["KnowledgeBaseUpdateParams"] - - -class KnowledgeBaseUpdateParams(TypedDict, total=False): - database_id: str - """the id of the DigitalOcean database this knowledge base will use, optiona.""" - - embedding_model_uuid: str - """Identifier for the foundation model.""" - - name: str - - project_id: str - - tags: List[str] - """Tags to organize your knowledge base.""" - - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py deleted file mode 100644 index f3ba2c32..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseUpdateResponse"] - - -class KnowledgeBaseUpdateResponse(BaseModel): - knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py deleted file mode 100644 index f5f31034..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource -from .data_source_list_params import DataSourceListParams as DataSourceListParams -from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams -from .data_source_list_response import DataSourceListResponse as DataSourceListResponse -from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource -from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource -from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse -from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse -from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam -from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource -from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam -from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py deleted file mode 100644 index 1dcc9639..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["APIFileUploadDataSource"] - - -class APIFileUploadDataSource(BaseModel): - original_file_name: Optional[str] = None - - size_in_bytes: Optional[str] = None - - stored_object_key: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py deleted file mode 100644 index 37221059..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIFileUploadDataSourceParam"] - - -class APIFileUploadDataSourceParam(TypedDict, total=False): - original_file_name: str - - size_in_bytes: str - - stored_object_key: str diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py deleted file mode 100644 index df1cd3bb..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel -from ..api_indexing_job import APIIndexingJob -from .api_spaces_data_source import APISpacesDataSource -from .api_file_upload_data_source import APIFileUploadDataSource -from .api_web_crawler_data_source import APIWebCrawlerDataSource - -__all__ = ["APIKnowledgeBaseDataSource"] - - -class APIKnowledgeBaseDataSource(BaseModel): - bucket_name: Optional[str] = None - - created_at: Optional[datetime] = None - - file_upload_data_source: Optional[APIFileUploadDataSource] = None - """File to upload as data source for knowledge base.""" - - item_path: Optional[str] = None - - last_indexing_job: Optional[APIIndexingJob] = None - - region: Optional[str] = None - - spaces_data_source: Optional[APISpacesDataSource] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py deleted file mode 100644 index f3a0421a..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["APISpacesDataSource"] - - -class APISpacesDataSource(BaseModel): - bucket_name: Optional[str] = None - - item_path: Optional[str] = None - - region: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py deleted file mode 100644 index b7f2f657..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APISpacesDataSourceParam"] - - -class APISpacesDataSourceParam(TypedDict, total=False): - bucket_name: str - - item_path: str - - region: str diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py deleted file mode 100644 index 4690c607..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["APIWebCrawlerDataSource"] - - -class APIWebCrawlerDataSource(BaseModel): - base_url: Optional[str] = None - """The base url to crawl.""" - - crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]] = None - """Options for specifying how URLs found on pages should be handled. - - - UNKNOWN: Default unknown value - - SCOPED: Only include the base URL. - - PATH: Crawl the base URL and linked pages within the URL path. - - DOMAIN: Crawl the base URL and linked pages within the same domain. - - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. - """ - - embed_media: Optional[bool] = None - """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py deleted file mode 100644 index 2345ed3a..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["APIWebCrawlerDataSourceParam"] - - -class APIWebCrawlerDataSourceParam(TypedDict, total=False): - base_url: str - """The base url to crawl.""" - - crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"] - """Options for specifying how URLs found on pages should be handled. - - - UNKNOWN: Default unknown value - - SCOPED: Only include the base URL. - - PATH: Crawl the base URL and linked pages within the URL path. - - DOMAIN: Crawl the base URL and linked pages within the same domain. - - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. - """ - - embed_media: bool - """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py deleted file mode 100644 index b1abafdf..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo -from .api_spaces_data_source_param import APISpacesDataSourceParam -from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam - -__all__ = ["DataSourceCreateParams", "AwsDataSource"] - - -class DataSourceCreateParams(TypedDict, total=False): - aws_data_source: AwsDataSource - - body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] - - spaces_data_source: APISpacesDataSourceParam - - web_crawler_data_source: APIWebCrawlerDataSourceParam - - -class AwsDataSource(TypedDict, total=False): - bucket_name: str - - item_path: str - - key_id: str - - region: str - - secret_key: str diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py deleted file mode 100644 index 1035d3f4..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource - -__all__ = ["DataSourceCreateResponse"] - - -class DataSourceCreateResponse(BaseModel): - knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py deleted file mode 100644 index 53954d7f..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["DataSourceDeleteResponse"] - - -class DataSourceDeleteResponse(BaseModel): - data_source_uuid: Optional[str] = None - - knowledge_base_uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py deleted file mode 100644 index e3ed5e3c..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["DataSourceListParams"] - - -class DataSourceListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py deleted file mode 100644 index 78246ce1..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks -from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource - -__all__ = ["DataSourceListResponse"] - - -class DataSourceListResponse(BaseModel): - knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py deleted file mode 100644 index eb47e709..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams -from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py deleted file mode 100644 index 389f167c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - api_key: str - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py deleted file mode 100644 index a032810c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyCreateResponse"] - - -class KeyCreateResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py deleted file mode 100644 index 2afe2dda..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyDeleteResponse"] - - -class KeyDeleteResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py deleted file mode 100644 index ebbc3b7e..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListAgentsParams"] - - -class KeyListAgentsParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py deleted file mode 100644 index ba6ca946..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks - -__all__ = ["KeyListAgentsResponse"] - - -class KeyListAgentsResponse(BaseModel): - agents: Optional[List["APIAgent"]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - -from ...api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py deleted file mode 100644 index a11458ad..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py deleted file mode 100644 index d0b84e96..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyListResponse"] - - -class KeyListResponse(BaseModel): - api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py deleted file mode 100644 index b8361fc2..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyRetrieveResponse"] - - -class KeyRetrieveResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py deleted file mode 100644 index c07d7f66..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - api_key: str - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py deleted file mode 100644 index b04277a6..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyUpdateResponse"] - - -class KeyUpdateResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/__init__.py b/src/digitalocean_genai_sdk/types/providers/openai/__init__.py deleted file mode 100644 index 70abf332..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams -from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py deleted file mode 100644 index 389f167c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - api_key: str - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py deleted file mode 100644 index f3b4d36c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyCreateResponse"] - - -class KeyCreateResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py deleted file mode 100644 index 0c8922bb..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyDeleteResponse"] - - -class KeyDeleteResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py deleted file mode 100644 index a11458ad..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py deleted file mode 100644 index c263cba3..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyListResponse"] - - -class KeyListResponse(BaseModel): - api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py deleted file mode 100644 index ec745d14..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyRetrieveAgentsParams"] - - -class KeyRetrieveAgentsParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py deleted file mode 100644 index f42edea6..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks - -__all__ = ["KeyRetrieveAgentsResponse"] - - -class KeyRetrieveAgentsResponse(BaseModel): - agents: Optional[List["APIAgent"]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - -from ...api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py deleted file mode 100644 index 7015b6f7..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyRetrieveResponse"] - - -class KeyRetrieveResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py deleted file mode 100644 index c07d7f66..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - api_key: str - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py deleted file mode 100644 index 4889f994..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyUpdateResponse"] - - -class KeyUpdateResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/region_list_params.py b/src/digitalocean_genai_sdk/types/region_list_params.py deleted file mode 100644 index 1db0ad50..00000000 --- a/src/digitalocean_genai_sdk/types/region_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["RegionListParams"] - - -class RegionListParams(TypedDict, total=False): - serves_batch: bool - """include datacenters that are capable of running batch jobs.""" - - serves_inference: bool - """include datacenters that serve inference.""" diff --git a/src/digitalocean_genai_sdk/types/region_list_response.py b/src/digitalocean_genai_sdk/types/region_list_response.py deleted file mode 100644 index 0f955b36..00000000 --- a/src/digitalocean_genai_sdk/types/region_list_response.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel - -__all__ = ["RegionListResponse", "Region"] - - -class Region(BaseModel): - inference_url: Optional[str] = None - - region: Optional[str] = None - - serves_batch: Optional[bool] = None - - serves_inference: Optional[bool] = None - - stream_inference_url: Optional[str] = None - - -class RegionListResponse(BaseModel): - regions: Optional[List[Region]] = None diff --git a/src/digitalocean_genai_sdk/__init__.py b/src/gradientai/__init__.py similarity index 87% rename from src/digitalocean_genai_sdk/__init__.py rename to src/gradientai/__init__.py index fc240d83..e0f0a60b 100644 --- a/src/digitalocean_genai_sdk/__init__.py +++ b/src/gradientai/__init__.py @@ -10,11 +10,11 @@ Stream, Timeout, Transport, + GradientAI, AsyncClient, AsyncStream, RequestOptions, - DigitaloceanGenaiSDK, - AsyncDigitaloceanGenaiSDK, + AsyncGradientAI, ) from ._models import BaseModel from ._version import __title__, __version__ @@ -28,12 +28,12 @@ RateLimitError, APITimeoutError, BadRequestError, + GradientAIError, APIConnectionError, AuthenticationError, InternalServerError, PermissionDeniedError, UnprocessableEntityError, - DigitaloceanGenaiSDKError, APIResponseValidationError, ) from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient @@ -49,7 +49,7 @@ "NotGiven", "NOT_GIVEN", "Omit", - "DigitaloceanGenaiSDKError", + "GradientAIError", "APIError", "APIStatusError", "APITimeoutError", @@ -69,8 +69,8 @@ "AsyncClient", "Stream", "AsyncStream", - "DigitaloceanGenaiSDK", - "AsyncDigitaloceanGenaiSDK", + "GradientAI", + "AsyncGradientAI", "file_from_path", "BaseModel", "DEFAULT_TIMEOUT", @@ -88,12 +88,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# digitalocean_genai_sdk._exceptions.NotFoundError -> digitalocean_genai_sdk.NotFoundError +# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "digitalocean_genai_sdk" + __locals[__name].__module__ = "gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/gradientai/_base_client.py similarity index 99% rename from src/digitalocean_genai_sdk/_base_client.py rename to src/gradientai/_base_client.py index 6fd247cc..aa3b35f1 100644 --- a/src/digitalocean_genai_sdk/_base_client.py +++ b/src/gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `digitalocean_genai_sdk.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/digitalocean_genai_sdk/_client.py b/src/gradientai/_client.py similarity index 63% rename from src/digitalocean_genai_sdk/_client.py rename to src/gradientai/_client.py index 2f86bb7d..b22056ad 100644 --- a/src/digitalocean_genai_sdk/_client.py +++ b/src/gradientai/_client.py @@ -23,7 +23,7 @@ from ._compat import cached_property from ._version import __version__ from ._streaming import Stream as Stream, AsyncStream as AsyncStream -from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError +from ._exceptions import APIStatusError, GradientAIError from ._base_client import ( DEFAULT_MAX_RETRIES, SyncAPIClient, @@ -31,42 +31,25 @@ ) if TYPE_CHECKING: - from .resources import ( - auth, - chat, - agents, - models, - regions, - api_keys, - providers, - embeddings, - indexing_jobs, - knowledge_bases, - ) + from .resources import chat, agents, models, embeddings from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource - from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.auth.auth import AuthResource, AsyncAuthResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource - from .resources.providers.providers import ProvidersResource, AsyncProvidersResource - from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ "Timeout", "Transport", "ProxiesTypes", "RequestOptions", - "DigitaloceanGenaiSDK", - "AsyncDigitaloceanGenaiSDK", + "GradientAI", + "AsyncGradientAI", "Client", "AsyncClient", ] -class DigitaloceanGenaiSDK(SyncAPIClient): +class GradientAI(SyncAPIClient): # client options api_key: str @@ -93,20 +76,20 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new synchronous DigitaloceanGenaiSDK client instance. + """Construct a new synchronous GradientAI client instance. This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. """ if api_key is None: api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") if api_key is None: - raise DigitaloceanGenaiSDKError( + raise GradientAIError( "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" ) self.api_key = api_key if base_url is None: - base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") + base_url = os.environ.get("GRADIENT_AI_BASE_URL") if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -127,42 +110,6 @@ def agents(self) -> AgentsResource: return AgentsResource(self) - @cached_property - def providers(self) -> ProvidersResource: - from .resources.providers import ProvidersResource - - return ProvidersResource(self) - - @cached_property - def auth(self) -> AuthResource: - from .resources.auth import AuthResource - - return AuthResource(self) - - @cached_property - def regions(self) -> RegionsResource: - from .resources.regions import RegionsResource - - return RegionsResource(self) - - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - from .resources.indexing_jobs import IndexingJobsResource - - return IndexingJobsResource(self) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResource: - from .resources.knowledge_bases import KnowledgeBasesResource - - return KnowledgeBasesResource(self) - - @cached_property - def api_keys(self) -> APIKeysResource: - from .resources.api_keys import APIKeysResource - - return APIKeysResource(self) - @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource @@ -182,12 +129,12 @@ def models(self) -> ModelsResource: return ModelsResource(self) @cached_property - def with_raw_response(self) -> DigitaloceanGenaiSDKWithRawResponse: - return DigitaloceanGenaiSDKWithRawResponse(self) + def with_raw_response(self) -> GradientAIWithRawResponse: + return GradientAIWithRawResponse(self) @cached_property - def with_streaming_response(self) -> DigitaloceanGenaiSDKWithStreamedResponse: - return DigitaloceanGenaiSDKWithStreamedResponse(self) + def with_streaming_response(self) -> GradientAIWithStreamedResponse: + return GradientAIWithStreamedResponse(self) @property @override @@ -294,7 +241,7 @@ def _make_status_error( return APIStatusError(err_msg, response=response, body=body) -class AsyncDigitaloceanGenaiSDK(AsyncAPIClient): +class AsyncGradientAI(AsyncAPIClient): # client options api_key: str @@ -321,20 +268,20 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new async AsyncDigitaloceanGenaiSDK client instance. + """Construct a new async AsyncGradientAI client instance. This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. """ if api_key is None: api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") if api_key is None: - raise DigitaloceanGenaiSDKError( + raise GradientAIError( "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" ) self.api_key = api_key if base_url is None: - base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") + base_url = os.environ.get("GRADIENT_AI_BASE_URL") if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -355,42 +302,6 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) - @cached_property - def providers(self) -> AsyncProvidersResource: - from .resources.providers import AsyncProvidersResource - - return AsyncProvidersResource(self) - - @cached_property - def auth(self) -> AsyncAuthResource: - from .resources.auth import AsyncAuthResource - - return AsyncAuthResource(self) - - @cached_property - def regions(self) -> AsyncRegionsResource: - from .resources.regions import AsyncRegionsResource - - return AsyncRegionsResource(self) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - from .resources.indexing_jobs import AsyncIndexingJobsResource - - return AsyncIndexingJobsResource(self) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResource: - from .resources.knowledge_bases import AsyncKnowledgeBasesResource - - return AsyncKnowledgeBasesResource(self) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - from .resources.api_keys import AsyncAPIKeysResource - - return AsyncAPIKeysResource(self) - @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource @@ -410,12 +321,12 @@ def models(self) -> AsyncModelsResource: return AsyncModelsResource(self) @cached_property - def with_raw_response(self) -> AsyncDigitaloceanGenaiSDKWithRawResponse: - return AsyncDigitaloceanGenaiSDKWithRawResponse(self) + def with_raw_response(self) -> AsyncGradientAIWithRawResponse: + return AsyncGradientAIWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncDigitaloceanGenaiSDKWithStreamedResponse: - return AsyncDigitaloceanGenaiSDKWithStreamedResponse(self) + def with_streaming_response(self) -> AsyncGradientAIWithStreamedResponse: + return AsyncGradientAIWithStreamedResponse(self) @property @override @@ -522,10 +433,10 @@ def _make_status_error( return APIStatusError(err_msg, response=response, body=body) -class DigitaloceanGenaiSDKWithRawResponse: - _client: DigitaloceanGenaiSDK +class GradientAIWithRawResponse: + _client: GradientAI - def __init__(self, client: DigitaloceanGenaiSDK) -> None: + def __init__(self, client: GradientAI) -> None: self._client = client @cached_property @@ -534,42 +445,6 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) - @cached_property - def providers(self) -> providers.ProvidersResourceWithRawResponse: - from .resources.providers import ProvidersResourceWithRawResponse - - return ProvidersResourceWithRawResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AuthResourceWithRawResponse: - from .resources.auth import AuthResourceWithRawResponse - - return AuthResourceWithRawResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.RegionsResourceWithRawResponse: - from .resources.regions import RegionsResourceWithRawResponse - - return RegionsResourceWithRawResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse - - return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: - from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse - - return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - from .resources.api_keys import APIKeysResourceWithRawResponse - - return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse @@ -589,10 +464,10 @@ def models(self) -> models.ModelsResourceWithRawResponse: return ModelsResourceWithRawResponse(self._client.models) -class AsyncDigitaloceanGenaiSDKWithRawResponse: - _client: AsyncDigitaloceanGenaiSDK +class AsyncGradientAIWithRawResponse: + _client: AsyncGradientAI - def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property @@ -601,42 +476,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) - @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: - from .resources.providers import AsyncProvidersResourceWithRawResponse - - return AsyncProvidersResourceWithRawResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithRawResponse: - from .resources.auth import AsyncAuthResourceWithRawResponse - - return AsyncAuthResourceWithRawResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: - from .resources.regions import AsyncRegionsResourceWithRawResponse - - return AsyncRegionsResourceWithRawResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse - - return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: - from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse - - return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse - - return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse @@ -656,10 +495,10 @@ def models(self) -> models.AsyncModelsResourceWithRawResponse: return AsyncModelsResourceWithRawResponse(self._client.models) -class DigitaloceanGenaiSDKWithStreamedResponse: - _client: DigitaloceanGenaiSDK +class GradientAIWithStreamedResponse: + _client: GradientAI - def __init__(self, client: DigitaloceanGenaiSDK) -> None: + def __init__(self, client: GradientAI) -> None: self._client = client @cached_property @@ -668,42 +507,6 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) - @cached_property - def providers(self) -> providers.ProvidersResourceWithStreamingResponse: - from .resources.providers import ProvidersResourceWithStreamingResponse - - return ProvidersResourceWithStreamingResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AuthResourceWithStreamingResponse: - from .resources.auth import AuthResourceWithStreamingResponse - - return AuthResourceWithStreamingResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.RegionsResourceWithStreamingResponse: - from .resources.regions import RegionsResourceWithStreamingResponse - - return RegionsResourceWithStreamingResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse - - return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: - from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse - - return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - from .resources.api_keys import APIKeysResourceWithStreamingResponse - - return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse @@ -723,10 +526,10 @@ def models(self) -> models.ModelsResourceWithStreamingResponse: return ModelsResourceWithStreamingResponse(self._client.models) -class AsyncDigitaloceanGenaiSDKWithStreamedResponse: - _client: AsyncDigitaloceanGenaiSDK +class AsyncGradientAIWithStreamedResponse: + _client: AsyncGradientAI - def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property @@ -735,42 +538,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) - @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: - from .resources.providers import AsyncProvidersResourceWithStreamingResponse - - return AsyncProvidersResourceWithStreamingResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: - from .resources.auth import AsyncAuthResourceWithStreamingResponse - - return AsyncAuthResourceWithStreamingResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: - from .resources.regions import AsyncRegionsResourceWithStreamingResponse - - return AsyncRegionsResourceWithStreamingResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse - - return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: - from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse - - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse - - return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse @@ -790,6 +557,6 @@ def models(self) -> models.AsyncModelsResourceWithStreamingResponse: return AsyncModelsResourceWithStreamingResponse(self._client.models) -Client = DigitaloceanGenaiSDK +Client = GradientAI -AsyncClient = AsyncDigitaloceanGenaiSDK +AsyncClient = AsyncGradientAI diff --git a/src/digitalocean_genai_sdk/_compat.py b/src/gradientai/_compat.py similarity index 100% rename from src/digitalocean_genai_sdk/_compat.py rename to src/gradientai/_compat.py diff --git a/src/digitalocean_genai_sdk/_constants.py b/src/gradientai/_constants.py similarity index 100% rename from src/digitalocean_genai_sdk/_constants.py rename to src/gradientai/_constants.py diff --git a/src/digitalocean_genai_sdk/_exceptions.py b/src/gradientai/_exceptions.py similarity index 97% rename from src/digitalocean_genai_sdk/_exceptions.py rename to src/gradientai/_exceptions.py index 755e166e..759c8d86 100644 --- a/src/digitalocean_genai_sdk/_exceptions.py +++ b/src/gradientai/_exceptions.py @@ -18,11 +18,11 @@ ] -class DigitaloceanGenaiSDKError(Exception): +class GradientAIError(Exception): pass -class APIError(DigitaloceanGenaiSDKError): +class APIError(GradientAIError): message: str request: httpx.Request diff --git a/src/digitalocean_genai_sdk/_files.py b/src/gradientai/_files.py similarity index 100% rename from src/digitalocean_genai_sdk/_files.py rename to src/gradientai/_files.py diff --git a/src/digitalocean_genai_sdk/_models.py b/src/gradientai/_models.py similarity index 100% rename from src/digitalocean_genai_sdk/_models.py rename to src/gradientai/_models.py diff --git a/src/digitalocean_genai_sdk/_qs.py b/src/gradientai/_qs.py similarity index 100% rename from src/digitalocean_genai_sdk/_qs.py rename to src/gradientai/_qs.py diff --git a/src/digitalocean_genai_sdk/_resource.py b/src/gradientai/_resource.py similarity index 76% rename from src/digitalocean_genai_sdk/_resource.py rename to src/gradientai/_resource.py index fe43ec28..9182ee0b 100644 --- a/src/digitalocean_genai_sdk/_resource.py +++ b/src/gradientai/_resource.py @@ -8,13 +8,13 @@ import anyio if TYPE_CHECKING: - from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + from ._client import GradientAI, AsyncGradientAI class SyncAPIResource: - _client: DigitaloceanGenaiSDK + _client: GradientAI - def __init__(self, client: DigitaloceanGenaiSDK) -> None: + def __init__(self, client: GradientAI) -> None: self._client = client self._get = client.get self._post = client.post @@ -28,9 +28,9 @@ def _sleep(self, seconds: float) -> None: class AsyncAPIResource: - _client: AsyncDigitaloceanGenaiSDK + _client: AsyncGradientAI - def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def __init__(self, client: AsyncGradientAI) -> None: self._client = client self._get = client.get self._post = client.post diff --git a/src/digitalocean_genai_sdk/_response.py b/src/gradientai/_response.py similarity index 98% rename from src/digitalocean_genai_sdk/_response.py rename to src/gradientai/_response.py index 7f1fff1d..2037e4ca 100644 --- a/src/digitalocean_genai_sdk/_response.py +++ b/src/gradientai/_response.py @@ -29,7 +29,7 @@ from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type -from ._exceptions import DigitaloceanGenaiSDKError, APIResponseValidationError +from ._exceptions import GradientAIError, APIResponseValidationError if TYPE_CHECKING: from ._models import FinalRequestOptions @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from digitalocean_genai_sdk import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from digitalocean_genai_sdk import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -558,11 +558,11 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `digitalocean_genai_sdk._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", ) -class StreamAlreadyConsumed(DigitaloceanGenaiSDKError): +class StreamAlreadyConsumed(GradientAIError): """ Attempted to read or stream content, but the content has already been streamed. diff --git a/src/digitalocean_genai_sdk/_streaming.py b/src/gradientai/_streaming.py similarity index 98% rename from src/digitalocean_genai_sdk/_streaming.py rename to src/gradientai/_streaming.py index 96c3f3d3..bab5eb80 100644 --- a/src/digitalocean_genai_sdk/_streaming.py +++ b/src/gradientai/_streaming.py @@ -12,7 +12,7 @@ from ._utils import extract_type_var_from_base if TYPE_CHECKING: - from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + from ._client import GradientAI, AsyncGradientAI _T = TypeVar("_T") @@ -30,7 +30,7 @@ def __init__( *, cast_to: type[_T], response: httpx.Response, - client: DigitaloceanGenaiSDK, + client: GradientAI, ) -> None: self.response = response self._cast_to = cast_to @@ -93,7 +93,7 @@ def __init__( *, cast_to: type[_T], response: httpx.Response, - client: AsyncDigitaloceanGenaiSDK, + client: AsyncGradientAI, ) -> None: self.response = response self._cast_to = cast_to diff --git a/src/digitalocean_genai_sdk/_types.py b/src/gradientai/_types.py similarity index 99% rename from src/digitalocean_genai_sdk/_types.py rename to src/gradientai/_types.py index 3c0d156e..1bac876d 100644 --- a/src/digitalocean_genai_sdk/_types.py +++ b/src/gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from digitalocean_genai_sdk import NoneType +# from gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/digitalocean_genai_sdk/_utils/__init__.py b/src/gradientai/_utils/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/__init__.py rename to src/gradientai/_utils/__init__.py diff --git a/src/digitalocean_genai_sdk/_utils/_logs.py b/src/gradientai/_utils/_logs.py similarity index 67% rename from src/digitalocean_genai_sdk/_utils/_logs.py rename to src/gradientai/_utils/_logs.py index e0c1fee5..9047e5c8 100644 --- a/src/digitalocean_genai_sdk/_utils/_logs.py +++ b/src/gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("digitalocean_genai_sdk") +logger: logging.Logger = logging.getLogger("gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - digitalocean_genai_sdk._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", @@ -14,7 +14,7 @@ def _basic_config() -> None: def setup_logging() -> None: - env = os.environ.get("DIGITALOCEAN_GENAI_SDK_LOG") + env = os.environ.get("GRADIENT_AI_LOG") if env == "debug": _basic_config() logger.setLevel(logging.DEBUG) diff --git a/src/digitalocean_genai_sdk/_utils/_proxy.py b/src/gradientai/_utils/_proxy.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_proxy.py rename to src/gradientai/_utils/_proxy.py diff --git a/src/digitalocean_genai_sdk/_utils/_reflection.py b/src/gradientai/_utils/_reflection.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_reflection.py rename to src/gradientai/_utils/_reflection.py diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/gradientai/_utils/_resources_proxy.py new file mode 100644 index 00000000..b3bc4931 --- /dev/null +++ b/src/gradientai/_utils/_resources_proxy.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import Any +from typing_extensions import override + +from ._proxy import LazyProxy + + +class ResourcesProxy(LazyProxy[Any]): + """A proxy for the `gradientai.resources` module. + + This is used so that we can lazily import `gradientai.resources` only when + needed *and* so that users can just import `gradientai` and reference `gradientai.resources` + """ + + @override + def __load__(self) -> Any: + import importlib + + mod = importlib.import_module("gradientai.resources") + return mod + + +resources = ResourcesProxy().__as_proxied__() diff --git a/src/digitalocean_genai_sdk/_utils/_streams.py b/src/gradientai/_utils/_streams.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_streams.py rename to src/gradientai/_utils/_streams.py diff --git a/src/digitalocean_genai_sdk/_utils/_sync.py b/src/gradientai/_utils/_sync.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_sync.py rename to src/gradientai/_utils/_sync.py diff --git a/src/digitalocean_genai_sdk/_utils/_transform.py b/src/gradientai/_utils/_transform.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_transform.py rename to src/gradientai/_utils/_transform.py diff --git a/src/digitalocean_genai_sdk/_utils/_typing.py b/src/gradientai/_utils/_typing.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_typing.py rename to src/gradientai/_utils/_typing.py diff --git a/src/digitalocean_genai_sdk/_utils/_utils.py b/src/gradientai/_utils/_utils.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_utils.py rename to src/gradientai/_utils/_utils.py diff --git a/src/digitalocean_genai_sdk/_version.py b/src/gradientai/_version.py similarity index 79% rename from src/digitalocean_genai_sdk/_version.py rename to src/gradientai/_version.py index 50483bc2..2cf47e97 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "digitalocean_genai_sdk" +__title__ = "gradientai" __version__ = "0.1.0-alpha.3" # x-release-please-version diff --git a/src/gradientai/lib/.keep b/src/gradientai/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/gradientai/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/digitalocean_genai_sdk/py.typed b/src/gradientai/py.typed similarity index 100% rename from src/digitalocean_genai_sdk/py.typed rename to src/gradientai/py.typed diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py new file mode 100644 index 00000000..386e2ed6 --- /dev/null +++ b/src/gradientai/resources/__init__.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .embeddings import ( + EmbeddingsResource, + AsyncEmbeddingsResource, + EmbeddingsResourceWithRawResponse, + AsyncEmbeddingsResourceWithRawResponse, + EmbeddingsResourceWithStreamingResponse, + AsyncEmbeddingsResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", + "EmbeddingsResource", + "AsyncEmbeddingsResource", + "EmbeddingsResourceWithRawResponse", + "AsyncEmbeddingsResourceWithRawResponse", + "EmbeddingsResourceWithStreamingResponse", + "AsyncEmbeddingsResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/auth/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 53% rename from src/digitalocean_genai_sdk/resources/auth/__init__.py rename to src/gradientai/resources/agents/__init__.py index 7c844a98..2ae2658b 100644 --- a/src/digitalocean_genai_sdk/resources/auth/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -16,18 +8,26 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, +) __all__ = [ + "VersionsResource", + "AsyncVersionsResource", + "VersionsResourceWithRawResponse", + "AsyncVersionsResourceWithRawResponse", + "VersionsResourceWithStreamingResponse", + "AsyncVersionsResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", ] diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/agents.py b/src/gradientai/resources/agents/agents.py similarity index 64% rename from src/digitalocean_genai_sdk/resources/auth/agents/agents.py rename to src/gradientai/resources/agents/agents.py index a0aa9faf..9896d179 100644 --- a/src/digitalocean_genai_sdk/resources/auth/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -2,24 +2,24 @@ from __future__ import annotations -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, ) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource __all__ = ["AgentsResource", "AsyncAgentsResource"] class AgentsResource(SyncAPIResource): @cached_property - def token(self) -> TokenResource: - return TokenResource(self._client) + def versions(self) -> VersionsResource: + return VersionsResource(self._client) @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: @@ -27,7 +27,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -36,15 +36,15 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) class AsyncAgentsResource(AsyncAPIResource): @cached_property - def token(self) -> AsyncTokenResource: - return AsyncTokenResource(self._client) + def versions(self) -> AsyncVersionsResource: + return AsyncVersionsResource(self._client) @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) @@ -71,8 +71,8 @@ def __init__(self, agents: AgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> TokenResourceWithRawResponse: - return TokenResourceWithRawResponse(self._agents.token) + def versions(self) -> VersionsResourceWithRawResponse: + return VersionsResourceWithRawResponse(self._agents.versions) class AsyncAgentsResourceWithRawResponse: @@ -80,8 +80,8 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> AsyncTokenResourceWithRawResponse: - return AsyncTokenResourceWithRawResponse(self._agents.token) + def versions(self) -> AsyncVersionsResourceWithRawResponse: + return AsyncVersionsResourceWithRawResponse(self._agents.versions) class AgentsResourceWithStreamingResponse: @@ -89,8 +89,8 @@ def __init__(self, agents: AgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> TokenResourceWithStreamingResponse: - return TokenResourceWithStreamingResponse(self._agents.token) + def versions(self) -> VersionsResourceWithStreamingResponse: + return VersionsResourceWithStreamingResponse(self._agents.versions) class AsyncAgentsResourceWithStreamingResponse: @@ -98,5 +98,5 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> AsyncTokenResourceWithStreamingResponse: - return AsyncTokenResourceWithStreamingResponse(self._agents.token) + def versions(self) -> AsyncVersionsResourceWithStreamingResponse: + return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/digitalocean_genai_sdk/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/agents/versions.py rename to src/gradientai/resources/agents/versions.py index e77a252b..d71da8df 100644 --- a/src/digitalocean_genai_sdk/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -143,7 +143,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -152,7 +152,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/chat.py b/src/gradientai/resources/chat.py similarity index 98% rename from src/digitalocean_genai_sdk/resources/chat.py rename to src/gradientai/resources/chat.py index 518fbad8..223e7cf3 100644 --- a/src/digitalocean_genai_sdk/resources/chat.py +++ b/src/gradientai/resources/chat.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -191,7 +191,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -200,7 +200,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/gradientai/resources/embeddings.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/embeddings.py rename to src/gradientai/resources/embeddings.py index 1bcd3145..36ffe3c6 100644 --- a/src/digitalocean_genai_sdk/resources/embeddings.py +++ b/src/gradientai/resources/embeddings.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -101,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -110,7 +110,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/gradientai/resources/models.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/models.py rename to src/gradientai/resources/models.py index 81b75441..c30e1135 100644 --- a/src/digitalocean_genai_sdk/resources/models.py +++ b/src/gradientai/resources/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py new file mode 100644 index 00000000..7b80eca4 --- /dev/null +++ b/src/gradientai/types/__init__.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .model import Model as Model +from .model_list_response import ModelListResponse as ModelListResponse +from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility +from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams +from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .chat_completion_request_message_content_part_text_param import ( + ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, +) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py new file mode 100644 index 00000000..fdee8834 --- /dev/null +++ b/src/gradientai/types/agents/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_meta import APIMeta as APIMeta +from .api_links import APILinks as APILinks +from .version_list_params import VersionListParams as VersionListParams +from .version_list_response import VersionListResponse as VersionListResponse +from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .version_update_response import VersionUpdateResponse as VersionUpdateResponse diff --git a/src/digitalocean_genai_sdk/types/agents/api_links.py b/src/gradientai/types/agents/api_links.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/api_links.py rename to src/gradientai/types/agents/api_links.py diff --git a/src/digitalocean_genai_sdk/types/agents/api_meta.py b/src/gradientai/types/agents/api_meta.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/api_meta.py rename to src/gradientai/types/agents/api_meta.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/digitalocean_genai_sdk/types/api_deployment_visibility.py b/src/gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/digitalocean_genai_sdk/types/api_deployment_visibility.py rename to src/gradientai/types/api_deployment_visibility.py diff --git a/src/digitalocean_genai_sdk/types/auth/__init__.py b/src/gradientai/types/api_keys/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/auth/__init__.py rename to src/gradientai/types/api_keys/__init__.py diff --git a/src/digitalocean_genai_sdk/types/api_retrieval_method.py b/src/gradientai/types/api_retrieval_method.py similarity index 100% rename from src/digitalocean_genai_sdk/types/api_retrieval_method.py rename to src/gradientai/types/api_retrieval_method.py diff --git a/src/digitalocean_genai_sdk/types/providers/__init__.py b/src/gradientai/types/auth/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/providers/__init__.py rename to src/gradientai/types/auth/__init__.py diff --git a/tests/api_resources/api_keys/__init__.py b/src/gradientai/types/auth/agents/__init__.py similarity index 70% rename from tests/api_resources/api_keys/__init__.py rename to src/gradientai/types/auth/agents/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/api_keys/__init__.py +++ b/src/gradientai/types/auth/agents/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py b/src/gradientai/types/chat_completion_request_message_content_part_text_param.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py rename to src/gradientai/types/chat_completion_request_message_content_part_text_param.py diff --git a/src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py b/src/gradientai/types/chat_completion_token_logprob.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py rename to src/gradientai/types/chat_completion_token_logprob.py diff --git a/src/digitalocean_genai_sdk/types/chat_create_completion_params.py b/src/gradientai/types/chat_create_completion_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_create_completion_params.py rename to src/gradientai/types/chat_create_completion_params.py diff --git a/src/digitalocean_genai_sdk/types/chat_create_completion_response.py b/src/gradientai/types/chat_create_completion_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_create_completion_response.py rename to src/gradientai/types/chat_create_completion_response.py diff --git a/src/digitalocean_genai_sdk/types/embedding_create_params.py b/src/gradientai/types/embedding_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/embedding_create_params.py rename to src/gradientai/types/embedding_create_params.py diff --git a/src/digitalocean_genai_sdk/types/embedding_create_response.py b/src/gradientai/types/embedding_create_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/embedding_create_response.py rename to src/gradientai/types/embedding_create_response.py diff --git a/tests/api_resources/auth/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py similarity index 70% rename from tests/api_resources/auth/__init__.py rename to src/gradientai/types/knowledge_bases/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/auth/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/digitalocean_genai_sdk/types/model.py b/src/gradientai/types/model.py similarity index 100% rename from src/digitalocean_genai_sdk/types/model.py rename to src/gradientai/types/model.py diff --git a/src/digitalocean_genai_sdk/types/model_list_response.py b/src/gradientai/types/model_list_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/model_list_response.py rename to src/gradientai/types/model_list_response.py diff --git a/tests/api_resources/auth/agents/__init__.py b/src/gradientai/types/providers/__init__.py similarity index 70% rename from tests/api_resources/auth/agents/__init__.py rename to src/gradientai/types/providers/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/auth/agents/__init__.py +++ b/src/gradientai/types/providers/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/tests/api_resources/knowledge_bases/__init__.py b/src/gradientai/types/providers/anthropic/__init__.py similarity index 70% rename from tests/api_resources/knowledge_bases/__init__.py rename to src/gradientai/types/providers/anthropic/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/knowledge_bases/__init__.py +++ b/src/gradientai/types/providers/anthropic/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/gradientai/types/providers/openai/__init__.py b/src/gradientai/types/providers/openai/__init__.py new file mode 100644 index 00000000..f8ee8b14 --- /dev/null +++ b/src/gradientai/types/providers/openai/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py deleted file mode 100644 index 911ac6f9..00000000 --- a/tests/api_resources/agents/test_api_keys.py +++ /dev/null @@ -1,572 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( - APIKeyListResponse, - APIKeyCreateResponse, - APIKeyDeleteResponse, - APIKeyUpdateResponse, - APIKeyRegenerateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.list( - agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( - api_key_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="", - agent_uuid="agent_uuid", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.list( - agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="", - agent_uuid="agent_uuid", - ) diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py deleted file mode 100644 index cfc8084e..00000000 --- a/tests/api_resources/agents/test_child_agents.py +++ /dev/null @@ -1,485 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( - ChildAgentAddResponse, - ChildAgentViewResponse, - ChildAgentDeleteResponse, - ChildAgentUpdateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestChildAgents: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_add_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_view(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.view( - "uuid", - ) - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_view(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.view( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_view(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.view( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_view(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.child_agents.with_raw_response.view( - "", - ) - - -class TestAsyncChildAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - await async_client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_add_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - await async_client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.view( - "uuid", - ) - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.view( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.view( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.view( - "", - ) diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py deleted file mode 100644 index d66590ba..00000000 --- a/tests/api_resources/agents/test_functions.py +++ /dev/null @@ -1,382 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( - FunctionCreateResponse, - FunctionDeleteResponse, - FunctionUpdateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFunctions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.agents.functions.with_raw_response.update( - path_function_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( - function_uuid="", - agent_uuid="agent_uuid", - ) - - -class TestAsyncFunctions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = await response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = await response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = await response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = await response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( - path_function_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = await response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = await response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( - function_uuid="", - agent_uuid="agent_uuid", - ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py deleted file mode 100644 index b313b1af..00000000 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ /dev/null @@ -1,314 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKnowledgeBases: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_attach(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.agents.knowledge_bases.attach( - "agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_attach(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach( - "agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_attach(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach( - "agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_attach(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_detach(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_detach(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_detach(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_detach(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) - - -class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach( - "agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach( - "agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach( - "agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 94f02d8c..77fee4c6 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) @@ -22,7 +22,7 @@ class TestVersions: @pytest.mark.skip() @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_update(self, client: GradientAI) -> None: version = client.agents.versions.update( path_uuid="uuid", ) @@ -30,7 +30,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_update_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", @@ -40,7 +40,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -52,7 +52,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: @@ -66,7 +66,7 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.agents.versions.with_raw_response.update( path_uuid="", @@ -74,7 +74,7 @@ def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_list(self, client: GradientAI) -> None: version = client.agents.versions.list( uuid="uuid", ) @@ -82,7 +82,7 @@ def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_list_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.list( uuid="uuid", page=0, @@ -92,7 +92,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -104,7 +104,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: @@ -118,7 +118,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.versions.with_raw_response.list( uuid="", @@ -130,7 +130,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_update(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( path_uuid="uuid", ) @@ -138,7 +138,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", @@ -148,7 +148,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -160,7 +160,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: @@ -174,7 +174,7 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.agents.versions.with_raw_response.update( path_uuid="", @@ -182,7 +182,7 @@ async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_list(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( uuid="uuid", ) @@ -190,7 +190,7 @@ async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( uuid="uuid", page=0, @@ -200,7 +200,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -212,7 +212,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: @@ -226,7 +226,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.versions.with_raw_response.list( uuid="", diff --git a/tests/api_resources/api_keys/test_api_keys_.py b/tests/api_resources/api_keys/test_api_keys_.py deleted file mode 100644 index 0ae74d6b..00000000 --- a/tests/api_resources/api_keys/test_api_keys_.py +++ /dev/null @@ -1,446 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.api_keys import ( - APIKeyListResponse, - APIKeyCreateResponse, - APIKeyDeleteResponse, - APIKeyUpdateResponse, - APIKeyUpdateRegenerateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) diff --git a/tests/api_resources/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py deleted file mode 100644 index 1e505ccd..00000000 --- a/tests/api_resources/auth/agents/test_token.py +++ /dev/null @@ -1,124 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.auth.agents import TokenCreateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestToken: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) - - -class TestAsyncToken: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py deleted file mode 100644 index 68fd67e5..00000000 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ /dev/null @@ -1,374 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.knowledge_bases import ( - DataSourceListResponse, - DataSourceCreateResponse, - DataSourceDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDataSources: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", - }, - body_knowledge_base_uuid="knowledge_base_uuid", - spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - web_crawler_data_source={ - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" - ): - client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - page=0, - per_page=0, - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): - client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", - ) - - -class TestAsyncDataSources: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", - }, - body_knowledge_base_uuid="knowledge_base_uuid", - spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - web_crawler_data_source={ - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = await response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = await response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" - ): - await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - page=0, - per_page=0, - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = await response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = await response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = await response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = await response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): - await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", - ) diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/providers/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/anthropic/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py deleted file mode 100644 index c5491bd4..00000000 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ /dev/null @@ -1,555 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.providers.anthropic import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyListAgentsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list_agents( - uuid="uuid", - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.list_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="", - ) - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list_agents( - uuid="uuid", - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="", - ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/openai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py deleted file mode 100644 index b88b6a5f..00000000 --- a/tests/api_resources/providers/openai/test_keys.py +++ /dev/null @@ -1,555 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.providers.openai import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyRetrieveAgentsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.retrieve_agents( - uuid="uuid", - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.retrieve_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="", - ) - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( - uuid="uuid", - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="", - ) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py deleted file mode 100644 index 3aafae23..00000000 --- a/tests/api_resources/test_agents.py +++ /dev/null @@ -1,597 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - AgentListResponse, - AgentCreateResponse, - AgentDeleteResponse, - AgentUpdateResponse, - AgentRetrieveResponse, - AgentUpdateStatusResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAgents: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], - ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.retrieve( - "uuid", - ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - provide_citations=True, - retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.list( - only_deployed=True, - page=0, - per_page=0, - ) - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.delete( - "uuid", - ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_status(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update_status( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_status_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", - visibility="VISIBILITY_UNKNOWN", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.update_status( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.update_status( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_status(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update_status( - path_uuid="", - ) - - -class TestAsyncAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], - ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.retrieve( - "uuid", - ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - provide_citations=True, - retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.list( - only_deployed=True, - page=0, - per_page=0, - ) - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.delete( - "uuid", - ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update_status( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_status_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", - visibility="VISIBILITY_UNKNOWN", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.update_status( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.update_status( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update_status( - path_uuid="", - ) diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/test_api_keys.py deleted file mode 100644 index 198eb261..00000000 --- a/tests/api_resources/test_api_keys.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import APIKeyListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_chat.py b/tests/api_resources/test_chat.py index 0bf48414..2c5bcbd8 100644 --- a/tests/api_resources/test_chat.py +++ b/tests/api_resources/test_chat.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ChatCreateCompletionResponse +from gradientai.types import ChatCreateCompletionResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestChat: @pytest.mark.skip() @parametrize - def test_method_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create_completion(self, client: GradientAI) -> None: chat = client.chat.create_completion( messages=[ { @@ -33,7 +33,7 @@ def test_method_create_completion(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_create_completion_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create_completion_with_all_params(self, client: GradientAI) -> None: chat = client.chat.create_completion( messages=[ { @@ -62,7 +62,7 @@ def test_method_create_completion_with_all_params(self, client: DigitaloceanGena @pytest.mark.skip() @parametrize - def test_raw_response_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_create_completion(self, client: GradientAI) -> None: response = client.chat.with_raw_response.create_completion( messages=[ { @@ -80,7 +80,7 @@ def test_raw_response_create_completion(self, client: DigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize - def test_streaming_response_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_create_completion(self, client: GradientAI) -> None: with client.chat.with_streaming_response.create_completion( messages=[ { @@ -104,7 +104,7 @@ class TestAsyncChat: @pytest.mark.skip() @parametrize - async def test_method_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create_completion(self, async_client: AsyncGradientAI) -> None: chat = await async_client.chat.create_completion( messages=[ { @@ -118,7 +118,7 @@ async def test_method_create_completion(self, async_client: AsyncDigitaloceanGen @pytest.mark.skip() @parametrize - async def test_method_create_completion_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create_completion_with_all_params(self, async_client: AsyncGradientAI) -> None: chat = await async_client.chat.create_completion( messages=[ { @@ -147,7 +147,7 @@ async def test_method_create_completion_with_all_params(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_create_completion(self, async_client: AsyncGradientAI) -> None: response = await async_client.chat.with_raw_response.create_completion( messages=[ { @@ -165,7 +165,7 @@ async def test_raw_response_create_completion(self, async_client: AsyncDigitaloc @pytest.mark.skip() @parametrize - async def test_streaming_response_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_create_completion(self, async_client: AsyncGradientAI) -> None: async with async_client.chat.with_streaming_response.create_completion( messages=[ { diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py index ea1b5879..e5b394ef 100644 --- a/tests/api_resources/test_embeddings.py +++ b/tests/api_resources/test_embeddings.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import EmbeddingCreateResponse +from gradientai.types import EmbeddingCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestEmbeddings: @pytest.mark.skip() @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create(self, client: GradientAI) -> None: embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -28,7 +28,7 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create_with_all_params(self, client: GradientAI) -> None: embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -38,7 +38,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_create(self, client: GradientAI) -> None: response = client.embeddings.with_raw_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_create(self, client: GradientAI) -> None: with client.embeddings.with_streaming_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -70,7 +70,7 @@ class TestAsyncEmbeddings: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create(self, async_client: AsyncGradientAI) -> None: embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -79,7 +79,7 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -89,7 +89,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.embeddings.with_raw_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -102,7 +102,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.embeddings.with_streaming_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py deleted file mode 100644 index 9ae7ec50..00000000 --- a/tests/api_resources/test_indexing_jobs.py +++ /dev/null @@ -1,446 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - IndexingJobListResponse, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobUpdateCancelResponse, - IndexingJobRetrieveDataSourcesResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestIndexingJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.create() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.retrieve( - "uuid", - ) - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.list() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.list( - page=0, - per_page=0, - ) - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.retrieve_data_sources( - "indexing_job_uuid", - ) - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.retrieve_data_sources( - "indexing_job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.retrieve_data_sources( - "indexing_job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve_data_sources( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.update_cancel( - path_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_cancel_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="", - ) - - -class TestAsyncIndexingJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.create() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.retrieve( - "uuid", - ) - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.list() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.list( - page=0, - per_page=0, - ) - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.retrieve_data_sources( - "indexing_job_uuid", - ) - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( - "indexing_job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( - "indexing_job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( - path_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_cancel_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="", - ) diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py deleted file mode 100644 index 34e3d753..00000000 --- a/tests/api_resources/test_knowledge_bases.py +++ /dev/null @@ -1,510 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - KnowledgeBaseListResponse, - KnowledgeBaseCreateResponse, - KnowledgeBaseDeleteResponse, - KnowledgeBaseUpdateResponse, - KnowledgeBaseRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKnowledgeBases: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.create() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.create( - database_id="database_id", - datasources=[ - { - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", - "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", - }, - "item_path": "item_path", - "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - "web_crawler_data_source": { - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - } - ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", - ) - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.retrieve( - "uuid", - ) - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.knowledge_bases.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.knowledge_bases.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.list() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.list( - page=0, - per_page=0, - ) - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.delete( - "uuid", - ) - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.knowledge_bases.with_raw_response.delete( - "", - ) - - -class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.create() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.create( - database_id="database_id", - datasources=[ - { - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", - "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", - }, - "item_path": "item_path", - "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - "web_crawler_data_source": { - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - } - ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", - ) - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.retrieve( - "uuid", - ) - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.knowledge_bases.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.knowledge_bases.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.list() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.list( - page=0, - per_page=0, - ) - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.delete( - "uuid", - ) - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.knowledge_bases.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 1148affb..b9559c8e 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import Model, ModelListResponse +from gradientai.types import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_retrieve(self, client: GradientAI) -> None: model = client.models.retrieve( "llama3-8b-instruct", ) @@ -27,7 +27,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_retrieve(self, client: GradientAI) -> None: response = client.models.with_raw_response.retrieve( "llama3-8b-instruct", ) @@ -39,7 +39,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_retrieve(self, client: GradientAI) -> None: with client.models.with_streaming_response.retrieve( "llama3-8b-instruct", ) as response: @@ -53,7 +53,7 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): client.models.with_raw_response.retrieve( "", @@ -61,13 +61,13 @@ def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_list(self, client: GradientAI) -> None: model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_list(self, client: GradientAI) -> None: response = client.models.with_raw_response.list() assert response.is_closed is True @@ -77,7 +77,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_list(self, client: GradientAI) -> None: with client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -93,7 +93,7 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: model = await async_client.models.retrieve( "llama3-8b-instruct", ) @@ -101,7 +101,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.with_raw_response.retrieve( "llama3-8b-instruct", ) @@ -113,7 +113,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: async with async_client.models.with_streaming_response.retrieve( "llama3-8b-instruct", ) as response: @@ -127,7 +127,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): await async_client.models.with_raw_response.retrieve( "", @@ -135,13 +135,13 @@ async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSD @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_list(self, async_client: AsyncGradientAI) -> None: model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.with_raw_response.list() assert response.is_closed is True @@ -151,7 +151,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py deleted file mode 100644 index f36b6c63..00000000 --- a/tests/api_resources/test_regions.py +++ /dev/null @@ -1,96 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import RegionListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRegions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - region = client.regions.list() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - region = client.regions.list( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.regions.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - region = response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.regions.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - region = response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncRegions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - region = await async_client.regions.list() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - region = await async_client.regions.list( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.regions.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - region = await response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.regions.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - region = await response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/conftest.py b/tests/conftest.py index abd9aa51..04c66a33 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,14 +7,14 @@ import pytest from pytest_asyncio import is_async_test -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from gradientai import GradientAI, AsyncGradientAI if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("digitalocean_genai_sdk").setLevel(logging.DEBUG) +logging.getLogger("gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests @@ -32,22 +32,20 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: @pytest.fixture(scope="session") -def client(request: FixtureRequest) -> Iterator[DigitaloceanGenaiSDK]: +def client(request: FixtureRequest) -> Iterator[GradientAI]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - with DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + with GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: yield client @pytest.fixture(scope="session") -async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncDigitaloceanGenaiSDK]: +async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - async with AsyncDigitaloceanGenaiSDK( - base_url=base_url, api_key=api_key, _strict_response_validation=strict - ) as client: + async with AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index d6412ded..59eee2ff 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,17 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError -from digitalocean_genai_sdk._types import Omit -from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions -from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER -from digitalocean_genai_sdk._exceptions import ( - APIStatusError, - APITimeoutError, - DigitaloceanGenaiSDKError, - APIResponseValidationError, -) -from digitalocean_genai_sdk._base_client import ( +from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from gradientai._types import Omit +from gradientai._models import BaseModel, FinalRequestOptions +from gradientai._constants import RAW_RESPONSE_HEADER +from gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError +from gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -54,7 +49,7 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: return 0.1 -def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiSDK) -> int: +def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int: transport = client._client._transport assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) @@ -62,8 +57,8 @@ def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiS return len(pool._requests) -class TestDigitaloceanGenaiSDK: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) +class TestGradientAI: + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) def test_raw_response(self, respx_mock: MockRouter) -> None: @@ -110,7 +105,7 @@ def test_copy_default_options(self) -> None: assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) assert client.default_headers["X-Foo"] == "bar" @@ -144,7 +139,7 @@ def test_copy_default_headers(self) -> None: client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" @@ -235,10 +230,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "digitalocean_genai_sdk/_legacy_response.py", - "digitalocean_genai_sdk/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "digitalocean_genai_sdk/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -269,7 +264,7 @@ def test_request_timeout(self) -> None: assert timeout == httpx.Timeout(100.0) def test_client_timeout_option(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) ) @@ -280,7 +275,7 @@ def test_client_timeout_option(self) -> None: def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -290,7 +285,7 @@ def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -300,7 +295,7 @@ def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -311,7 +306,7 @@ def test_http_client_timeout_option(self) -> None: async def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): async with httpx.AsyncClient() as http_client: - DigitaloceanGenaiSDK( + GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -319,14 +314,14 @@ async def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = DigitaloceanGenaiSDK( + client2 = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -340,17 +335,17 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with pytest.raises(DigitaloceanGenaiSDKError): + with pytest.raises(GradientAIError): with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): - client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -464,7 +459,7 @@ def test_request_extra_query(self) -> None: params = dict(request.url.params) assert params == {"foo": "2"} - def test_multipart_repeating_array(self, client: DigitaloceanGenaiSDK) -> None: + def test_multipart_repeating_array(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions.construct( method="get", @@ -551,9 +546,7 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = DigitaloceanGenaiSDK( - base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True - ) + client = GradientAI(base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True) assert client.base_url == "https://example.com/from_init/" client.base_url = "https://example.com/from_setter" # type: ignore[assignment] @@ -561,17 +554,17 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"): - client = DigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True) + with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): + client = GradientAI(api_key=api_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -580,7 +573,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: + def test_base_url_trailing_slash(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -593,10 +586,10 @@ def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.parametrize( "client", [ - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -605,7 +598,7 @@ def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: + def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -618,10 +611,10 @@ def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.parametrize( "client", [ - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -630,7 +623,7 @@ def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None: + def test_absolute_request_url(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -641,7 +634,7 @@ def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None: assert request.url == "https://myapi.com/foo" def test_copied_client_does_not_close_http(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() @@ -652,7 +645,7 @@ def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() def test_client_context_manager(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with client as c2: assert c2 is client assert not c2.is_closed() @@ -673,7 +666,7 @@ class Model(BaseModel): def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): - DigitaloceanGenaiSDK( + GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) ) @@ -684,12 +677,12 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + strict_client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) response = client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -717,14 +710,14 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -738,7 +731,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No assert _get_open_connections(self.client) == 0 - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) @@ -753,12 +746,12 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( self, - client: DigitaloceanGenaiSDK, + client: GradientAI, failures_before_success: int, failure_mode: Literal["status", "exception"], respx_mock: MockRouter, @@ -784,10 +777,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( - self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = client.with_options(max_retries=4) @@ -809,10 +802,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( - self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = client.with_options(max_retries=4) @@ -861,8 +854,8 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" -class TestAsyncDigitaloceanGenaiSDK: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) +class TestAsyncGradientAI: + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @@ -911,7 +904,7 @@ def test_copy_default_options(self) -> None: assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) assert client.default_headers["X-Foo"] == "bar" @@ -945,7 +938,7 @@ def test_copy_default_headers(self) -> None: client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" @@ -1036,10 +1029,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "digitalocean_genai_sdk/_legacy_response.py", - "digitalocean_genai_sdk/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "digitalocean_genai_sdk/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1070,7 +1063,7 @@ async def test_request_timeout(self) -> None: assert timeout == httpx.Timeout(100.0) async def test_client_timeout_option(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) ) @@ -1081,7 +1074,7 @@ async def test_client_timeout_option(self) -> None: async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -1091,7 +1084,7 @@ async def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -1101,7 +1094,7 @@ async def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -1112,7 +1105,7 @@ async def test_http_client_timeout_option(self) -> None: def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): with httpx.Client() as http_client: - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -1120,14 +1113,14 @@ def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = AsyncDigitaloceanGenaiSDK( + client2 = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -1141,17 +1134,17 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with pytest.raises(DigitaloceanGenaiSDKError): + with pytest.raises(GradientAIError): with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): - client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1265,7 +1258,7 @@ def test_request_extra_query(self) -> None: params = dict(request.url.params) assert params == {"foo": "2"} - def test_multipart_repeating_array(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + def test_multipart_repeating_array(self, async_client: AsyncGradientAI) -> None: request = async_client._build_request( FinalRequestOptions.construct( method="get", @@ -1352,7 +1345,7 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True ) assert client.base_url == "https://example.com/from_init/" @@ -1362,17 +1355,17 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"): - client = AsyncDigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True) + with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): + client = AsyncGradientAI(api_key=api_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -1381,7 +1374,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1394,10 +1387,10 @@ def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.parametrize( "client", [ - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -1406,7 +1399,7 @@ def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> Non ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1419,10 +1412,10 @@ def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.parametrize( "client", [ - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -1431,7 +1424,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def test_absolute_request_url(self, client: AsyncGradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1442,7 +1435,7 @@ def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None: assert request.url == "https://myapi.com/foo" async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() @@ -1454,7 +1447,7 @@ async def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) async with client as c2: assert c2 is client assert not c2.is_closed() @@ -1476,7 +1469,7 @@ class Model(BaseModel): async def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) ) @@ -1488,12 +1481,12 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + strict_client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) response = await client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -1522,14 +1515,14 @@ class Model(BaseModel): @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -1543,7 +1536,7 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) assert _get_open_connections(self.client) == 0 - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) @@ -1558,13 +1551,13 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) async def test_retries_taken( self, - async_client: AsyncDigitaloceanGenaiSDK, + async_client: AsyncGradientAI, failures_before_success: int, failure_mode: Literal["status", "exception"], respx_mock: MockRouter, @@ -1590,11 +1583,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( - self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, async_client: AsyncGradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = async_client.with_options(max_retries=4) @@ -1616,11 +1609,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( - self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, async_client: AsyncGradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = async_client.with_options(max_retries=4) @@ -1652,8 +1645,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from digitalocean_genai_sdk._utils import asyncify - from digitalocean_genai_sdk._base_client import get_platform + from gradientai._utils import asyncify + from gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 317130ef..9d1579a8 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from digitalocean_genai_sdk._utils import deepcopy_minimal +from gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index aad87e09..2905d59c 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from digitalocean_genai_sdk._types import FileTypes -from digitalocean_genai_sdk._utils import extract_files +from gradientai._types import FileTypes +from gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index f3a07ce0..4a723313 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from digitalocean_genai_sdk._files import to_httpx_files, async_to_httpx_files +from gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 0be34866..28aff1f3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from digitalocean_genai_sdk._utils import PropertyInfo -from digitalocean_genai_sdk._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from digitalocean_genai_sdk._models import BaseModel, construct_type +from gradientai._utils import PropertyInfo +from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index 41824698..9080377b 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from digitalocean_genai_sdk._qs import Querystring, stringify +from gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index 379ac794..c4e6b9d8 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from digitalocean_genai_sdk._utils import required_args +from gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 768537aa..1a8f241e 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from digitalocean_genai_sdk import BaseModel, DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk._response import ( +from gradientai import BaseModel, GradientAI, AsyncGradientAI +from gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from digitalocean_genai_sdk._streaming import Stream -from digitalocean_genai_sdk._base_client import FinalRequestOptions +from gradientai._streaming import Stream +from gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -56,7 +56,7 @@ def test_extract_response_type_binary_response() -> None: class PydanticModel(pydantic.BaseModel): ... -def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, @@ -68,13 +68,13 @@ def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> No with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): response.parse(to=PydanticModel) @pytest.mark.asyncio -async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, @@ -86,12 +86,12 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigi with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): await response.parse(to=PydanticModel) -def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_custom_stream(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, @@ -106,7 +106,7 @@ def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None: @pytest.mark.asyncio -async def test_async_response_parse_custom_stream(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_custom_stream(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, @@ -125,7 +125,7 @@ class CustomModel(BaseModel): bar: int -def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_custom_model(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, @@ -141,7 +141,7 @@ def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None: @pytest.mark.asyncio -async def test_async_response_parse_custom_model(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_custom_model(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, @@ -156,7 +156,7 @@ async def test_async_response_parse_custom_model(async_client: AsyncDigitalocean assert obj.bar == 2 -def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_annotated_type(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, @@ -173,7 +173,7 @@ def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None: assert obj.bar == 2 -async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_annotated_type(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, @@ -201,7 +201,7 @@ async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloce ("FalSe", False), ], ) -def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expected: bool) -> None: +def test_response_parse_bool(client: GradientAI, content: str, expected: bool) -> None: response = APIResponse( raw=httpx.Response(200, content=content), client=client, @@ -226,7 +226,7 @@ def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expecte ("FalSe", False), ], ) -async def test_async_response_parse_bool(client: AsyncDigitaloceanGenaiSDK, content: str, expected: bool) -> None: +async def test_async_response_parse_bool(client: AsyncGradientAI, content: str, expected: bool) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=content), client=client, @@ -245,7 +245,7 @@ class OtherModel(BaseModel): @pytest.mark.parametrize("client", [False], indirect=True) # loose validation -def test_response_parse_expect_model_union_non_json_content(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_expect_model_union_non_json_content(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=client, @@ -262,9 +262,7 @@ def test_response_parse_expect_model_union_non_json_content(client: Digitalocean @pytest.mark.asyncio @pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation -async def test_async_response_parse_expect_model_union_non_json_content( - async_client: AsyncDigitaloceanGenaiSDK, -) -> None: +async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=async_client, diff --git a/tests/test_streaming.py b/tests/test_streaming.py index e707c674..cdb41a77 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,13 +5,13 @@ import httpx import pytest -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk._streaming import Stream, AsyncStream, ServerSentEvent +from gradientai import GradientAI, AsyncGradientAI +from gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_basic(sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_basic(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: completion\n" yield b'data: {"foo":true}\n' @@ -28,9 +28,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_data_missing_event( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_data_missing_event(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b'data: {"foo":true}\n' yield b"\n" @@ -46,9 +44,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_event_missing_data( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_event_missing_data(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"\n" @@ -64,9 +60,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_events( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_multiple_events(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"\n" @@ -88,9 +82,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_events_with_data( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_multiple_events_with_data(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b'data: {"foo":true}\n' @@ -115,7 +107,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_multiple_data_lines_with_empty_line( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK + sync: bool, client: GradientAI, async_client: AsyncGradientAI ) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" @@ -138,9 +130,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_data_json_escaped_double_new_line( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_data_json_escaped_double_new_line(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b'data: {"foo": "my long\\n\\ncontent"}' @@ -157,9 +147,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_data_lines( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_multiple_data_lines(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"data: {\n" @@ -179,8 +167,8 @@ def body() -> Iterator[bytes]: @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_special_new_line_character( sync: bool, - client: DigitaloceanGenaiSDK, - async_client: AsyncDigitaloceanGenaiSDK, + client: GradientAI, + async_client: AsyncGradientAI, ) -> None: def body() -> Iterator[bytes]: yield b'data: {"content":" culpa"}\n' @@ -210,8 +198,8 @@ def body() -> Iterator[bytes]: @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_multi_byte_character_multiple_chunks( sync: bool, - client: DigitaloceanGenaiSDK, - async_client: AsyncDigitaloceanGenaiSDK, + client: GradientAI, + async_client: AsyncGradientAI, ) -> None: def body() -> Iterator[bytes]: yield b'data: {"content":"' @@ -251,8 +239,8 @@ def make_event_iterator( content: Iterator[bytes], *, sync: bool, - client: DigitaloceanGenaiSDK, - async_client: AsyncDigitaloceanGenaiSDK, + client: GradientAI, + async_client: AsyncGradientAI, ) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]: if sync: return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events() diff --git a/tests/test_transform.py b/tests/test_transform.py index 3c29084e..825fe048 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from digitalocean_genai_sdk._types import NOT_GIVEN, Base64FileInput -from digitalocean_genai_sdk._utils import ( +from gradientai._types import NOT_GIVEN, Base64FileInput +from gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from digitalocean_genai_sdk._compat import PYDANTIC_V2 -from digitalocean_genai_sdk._models import BaseModel +from gradientai._compat import PYDANTIC_V2 +from gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 6fe8c808..3856b2c9 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from digitalocean_genai_sdk._utils import LazyProxy +from gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 72bf3422..66ad064f 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from digitalocean_genai_sdk._utils import extract_type_var_from_base +from gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index e795e2e8..b539ed2c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from digitalocean_genai_sdk._types import Omit, NoneType -from digitalocean_genai_sdk._utils import ( +from gradientai._types import Omit, NoneType +from gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from digitalocean_genai_sdk._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from digitalocean_genai_sdk._models import BaseModel +from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From afc3b43bb5305634fbad9923b0c998f594759e54 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:13:58 +0000 Subject: [PATCH 024/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 7 + src/gradientai/resources/agents/agents.py | 272 ++++++++++++++++++ src/gradientai/types/__init__.py | 13 + src/gradientai/types/agent_create_params.py | 39 +++ src/gradientai/types/agent_create_response.py | 16 ++ src/gradientai/types/agent_list_params.py | 18 ++ src/gradientai/types/agent_list_response.py | 198 +++++++++++++ src/gradientai/types/api_agent.py | 263 +++++++++++++++++ .../types/api_agent_api_key_info.py | 22 ++ src/gradientai/types/api_agreement.py | 17 ++ .../types/api_anthropic_api_key_info.py | 22 ++ src/gradientai/types/api_indexing_job.py | 43 +++ src/gradientai/types/api_knowledge_base.py | 37 +++ src/gradientai/types/api_model.py | 57 ++++ src/gradientai/types/api_model_version.py | 15 + .../types/api_openai_api_key_info.py | 25 ++ tests/api_resources/test_agents.py | 188 ++++++++++++ 18 files changed, 1255 insertions(+), 3 deletions(-) create mode 100644 src/gradientai/types/agent_create_params.py create mode 100644 src/gradientai/types/agent_create_response.py create mode 100644 src/gradientai/types/agent_list_params.py create mode 100644 src/gradientai/types/agent_list_response.py create mode 100644 src/gradientai/types/api_agent.py create mode 100644 src/gradientai/types/api_agent_api_key_info.py create mode 100644 src/gradientai/types/api_agreement.py create mode 100644 src/gradientai/types/api_anthropic_api_key_info.py create mode 100644 src/gradientai/types/api_indexing_job.py create mode 100644 src/gradientai/types/api_knowledge_base.py create mode 100644 src/gradientai/types/api_model.py create mode 100644 src/gradientai/types/api_model_version.py create mode 100644 src/gradientai/types/api_openai_api_key_info.py create mode 100644 tests/api_resources/test_agents.py diff --git a/.stats.yml b/.stats.yml index 652e9eac..1a1a584d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 6 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml -openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a +configured_endpoints: 8 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2b5d132a76e51849a4fdbb2da2818132d1f8208f137acb86ee71e4a5c130154e.yml +openapi_spec_hash: 6c13968b99ef16b717854a096b6ca506 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index d05dac3c..d39072be 100644 --- a/api.md +++ b/api.md @@ -11,9 +11,16 @@ from gradientai.types import ( APIModel, APIOpenAIAPIKeyInfo, APIRetrievalMethod, + AgentCreateResponse, + AgentListResponse, ) ``` +Methods: + +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.list(\*\*params) -> AgentListResponse + ## Versions Types: diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 9896d179..b42dc03c 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -2,6 +2,13 @@ from __future__ import annotations +from typing import List + +import httpx + +from ...types import agent_list_params, agent_create_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform from .versions import ( VersionsResource, AsyncVersionsResource, @@ -12,6 +19,15 @@ ) from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agent_list_response import AgentListResponse +from ...types.agent_create_response import AgentCreateResponse __all__ = ["AgentsResource", "AsyncAgentsResource"] @@ -40,6 +56,120 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ return AgentsResourceWithStreamingResponse(self) + def create( + self, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentCreateResponse: + """To create a new agent, send a POST request to `/v2/gen-ai/agents`. + + The response + body contains a JSON object with the newly created agent object. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + model_uuid: Identifier for the foundation model. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/agents", + body=maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "knowledge_base_uuid": knowledge_base_uuid, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "region": region, + "tags": tags, + }, + agent_create_params.AgentCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentCreateResponse, + ) + + def list( + self, + *, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents, send a GET request to `/v2/gen-ai/agents`. + + Args: + only_deployed: only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + class AsyncAgentsResource(AsyncAPIResource): @cached_property @@ -65,11 +195,132 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ return AsyncAgentsResourceWithStreamingResponse(self) + async def create( + self, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentCreateResponse: + """To create a new agent, send a POST request to `/v2/gen-ai/agents`. + + The response + body contains a JSON object with the newly created agent object. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + model_uuid: Identifier for the foundation model. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/agents", + body=await async_maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "knowledge_base_uuid": knowledge_base_uuid, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "region": region, + "tags": tags, + }, + agent_create_params.AgentCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentCreateResponse, + ) + + async def list( + self, + *, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents, send a GET request to `/v2/gen-ai/agents`. + + Args: + only_deployed: only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + class AgentsResourceWithRawResponse: def __init__(self, agents: AgentsResource) -> None: self._agents = agents + self.create = to_raw_response_wrapper( + agents.create, + ) + self.list = to_raw_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) @@ -79,6 +330,13 @@ class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents + self.create = async_to_raw_response_wrapper( + agents.create, + ) + self.list = async_to_raw_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) @@ -88,6 +346,13 @@ class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: self._agents = agents + self.create = to_streamed_response_wrapper( + agents.create, + ) + self.list = to_streamed_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) @@ -97,6 +362,13 @@ class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents + self.create = async_to_streamed_response_wrapper( + agents.create, + ) + self.list = async_to_streamed_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 7b80eca4..25d7b58d 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -3,11 +3,24 @@ from __future__ import annotations from .model import Model as Model +from .api_agent import APIAgent as APIAgent +from .api_model import APIModel as APIModel +from .api_agreement import APIAgreement as APIAgreement +from .api_indexing_job import APIIndexingJob as APIIndexingJob +from .agent_list_params import AgentListParams as AgentListParams +from .api_model_version import APIModelVersion as APIModelVersion +from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py new file mode 100644 index 00000000..58b99df7 --- /dev/null +++ b/src/gradientai/types/agent_create_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["AgentCreateParams"] + + +class AgentCreateParams(TypedDict, total=False): + anthropic_key_uuid: str + + description: str + + instruction: str + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + knowledge_base_uuid: List[str] + + model_uuid: str + """Identifier for the foundation model.""" + + name: str + + openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + + project_id: str + + region: str + + tags: List[str] diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py new file mode 100644 index 00000000..48545fe9 --- /dev/null +++ b/src/gradientai/types/agent_create_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentCreateResponse"] + + +class AgentCreateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py new file mode 100644 index 00000000..e13a10c9 --- /dev/null +++ b/src/gradientai/types/agent_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AgentListParams"] + + +class AgentListParams(TypedDict, total=False): + only_deployed: bool + """only list agents that are deployed.""" + + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py new file mode 100644 index 00000000..4cedbb39 --- /dev/null +++ b/src/gradientai/types/agent_list_response.py @@ -0,0 +1,198 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_model import APIModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_deployment_visibility import APIDeploymentVisibility + +__all__ = [ + "AgentListResponse", + "Agent", + "AgentChatbot", + "AgentChatbotIdentifier", + "AgentDeployment", + "AgentTemplate", + "AgentTemplateGuardrail", +] + + +class AgentChatbot(BaseModel): + button_background_color: Optional[str] = None + + logo: Optional[str] = None + + name: Optional[str] = None + + primary_color: Optional[str] = None + + secondary_color: Optional[str] = None + + starting_message: Optional[str] = None + + +class AgentChatbotIdentifier(BaseModel): + agent_chatbot_identifier: Optional[str] = None + + +class AgentDeployment(BaseModel): + created_at: Optional[datetime] = None + + name: Optional[str] = None + + status: Optional[ + Literal[ + "STATUS_UNKNOWN", + "STATUS_WAITING_FOR_DEPLOYMENT", + "STATUS_DEPLOYING", + "STATUS_RUNNING", + "STATUS_FAILED", + "STATUS_WAITING_FOR_UNDEPLOYMENT", + "STATUS_UNDEPLOYING", + "STATUS_UNDEPLOYMENT_FAILED", + "STATUS_DELETED", + ] + ] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + visibility: Optional[APIDeploymentVisibility] = None + + +class AgentTemplateGuardrail(BaseModel): + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class AgentTemplate(BaseModel): + created_at: Optional[datetime] = None + + description: Optional[str] = None + + guardrails: Optional[List[AgentTemplateGuardrail]] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + long_description: Optional[str] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + short_description: Optional[str] = None + + summary: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class Agent(BaseModel): + chatbot: Optional[AgentChatbot] = None + + chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None + + created_at: Optional[datetime] = None + + deployment: Optional[AgentDeployment] = None + + description: Optional[str] = None + + if_case: Optional[str] = None + + instruction: Optional[str] = None + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: Optional[int] = None + + max_tokens: Optional[int] = None + """ + Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + """ + + model: Optional[APIModel] = None + + name: Optional[str] = None + + project_id: Optional[str] = None + + provide_citations: Optional[bool] = None + + region: Optional[str] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + route_created_at: Optional[datetime] = None + + route_created_by: Optional[str] = None + + route_name: Optional[str] = None + + route_uuid: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + """Controls the model’s creativity, specified as a number between 0 and 1. + + Lower values produce more predictable and conservative responses, while higher + values encourage creativity and variation. + """ + + template: Optional[AgentTemplate] = None + + top_p: Optional[float] = None + """ + Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + """ + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentListResponse(BaseModel): + agents: Optional[List[Agent]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py new file mode 100644 index 00000000..d6e18ca2 --- /dev/null +++ b/src/gradientai/types/api_agent.py @@ -0,0 +1,263 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_model import APIModel +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_agent_api_key_info import APIAgentAPIKeyInfo +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from .api_deployment_visibility import APIDeploymentVisibility +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = [ + "APIAgent", + "APIKey", + "Chatbot", + "ChatbotIdentifier", + "Deployment", + "Function", + "Guardrail", + "Template", + "TemplateGuardrail", +] + + +class APIKey(BaseModel): + api_key: Optional[str] = None + + +class Chatbot(BaseModel): + button_background_color: Optional[str] = None + + logo: Optional[str] = None + + name: Optional[str] = None + + primary_color: Optional[str] = None + + secondary_color: Optional[str] = None + + starting_message: Optional[str] = None + + +class ChatbotIdentifier(BaseModel): + agent_chatbot_identifier: Optional[str] = None + + +class Deployment(BaseModel): + created_at: Optional[datetime] = None + + name: Optional[str] = None + + status: Optional[ + Literal[ + "STATUS_UNKNOWN", + "STATUS_WAITING_FOR_DEPLOYMENT", + "STATUS_DEPLOYING", + "STATUS_RUNNING", + "STATUS_FAILED", + "STATUS_WAITING_FOR_UNDEPLOYMENT", + "STATUS_UNDEPLOYING", + "STATUS_UNDEPLOYMENT_FAILED", + "STATUS_DELETED", + ] + ] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + visibility: Optional[APIDeploymentVisibility] = None + + +class Function(BaseModel): + api_key: Optional[str] = None + + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + description: Optional[str] = None + + faas_name: Optional[str] = None + + faas_namespace: Optional[str] = None + + input_schema: Optional[object] = None + + name: Optional[str] = None + + output_schema: Optional[object] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Guardrail(BaseModel): + agent_uuid: Optional[str] = None + + created_at: Optional[datetime] = None + + default_response: Optional[str] = None + + description: Optional[str] = None + + guardrail_uuid: Optional[str] = None + + is_attached: Optional[bool] = None + + is_default: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + priority: Optional[int] = None + + type: Optional[ + Literal[ + "GUARDRAIL_TYPE_UNKNOWN", + "GUARDRAIL_TYPE_JAILBREAK", + "GUARDRAIL_TYPE_SENSITIVE_DATA", + "GUARDRAIL_TYPE_CONTENT_MODERATION", + ] + ] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class TemplateGuardrail(BaseModel): + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class Template(BaseModel): + created_at: Optional[datetime] = None + + description: Optional[str] = None + + guardrails: Optional[List[TemplateGuardrail]] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + long_description: Optional[str] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + short_description: Optional[str] = None + + summary: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class APIAgent(BaseModel): + anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None + + api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + + api_keys: Optional[List[APIKey]] = None + + chatbot: Optional[Chatbot] = None + + chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None + + child_agents: Optional[List["APIAgent"]] = None + + created_at: Optional[datetime] = None + + deployment: Optional[Deployment] = None + + description: Optional[str] = None + + functions: Optional[List[Function]] = None + + guardrails: Optional[List[Guardrail]] = None + + if_case: Optional[str] = None + + instruction: Optional[str] = None + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None + + parent_agents: Optional[List["APIAgent"]] = None + + project_id: Optional[str] = None + + provide_citations: Optional[bool] = None + + region: Optional[str] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + route_created_at: Optional[datetime] = None + + route_created_by: Optional[str] = None + + route_name: Optional[str] = None + + route_uuid: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template: Optional[Template] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py new file mode 100644 index 00000000..8dc71564 --- /dev/null +++ b/src/gradientai/types/api_agent_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["APIAgentAPIKeyInfo"] + + +class APIAgentAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + secret_key: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py new file mode 100644 index 00000000..c4359f1f --- /dev/null +++ b/src/gradientai/types/api_agreement.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIAgreement"] + + +class APIAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py new file mode 100644 index 00000000..e2e04a8a --- /dev/null +++ b/src/gradientai/types/api_anthropic_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["APIAnthropicAPIKeyInfo"] + + +class APIAnthropicAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_indexing_job.py b/src/gradientai/types/api_indexing_job.py new file mode 100644 index 00000000..f24aac94 --- /dev/null +++ b/src/gradientai/types/api_indexing_job.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["APIIndexingJob"] + + +class APIIndexingJob(BaseModel): + completed_datasources: Optional[int] = None + + created_at: Optional[datetime] = None + + data_source_uuids: Optional[List[str]] = None + + finished_at: Optional[datetime] = None + + knowledge_base_uuid: Optional[str] = None + + phase: Optional[ + Literal[ + "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", + "BATCH_JOB_PHASE_CANCELLED", + ] + ] = None + + started_at: Optional[datetime] = None + + tokens: Optional[int] = None + + total_datasources: Optional[int] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py new file mode 100644 index 00000000..5b4b6e2c --- /dev/null +++ b/src/gradientai/types/api_knowledge_base.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["APIKnowledgeBase"] + + +class APIKnowledgeBase(BaseModel): + added_to_agent_at: Optional[datetime] = None + + created_at: Optional[datetime] = None + + database_id: Optional[str] = None + + embedding_model_uuid: Optional[str] = None + + is_public: Optional[bool] = None + + last_indexing_job: Optional[APIIndexingJob] = None + + name: Optional[str] = None + + project_id: Optional[str] = None + + region: Optional[str] = None + + tags: Optional[List[str]] = None + + updated_at: Optional[datetime] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py new file mode 100644 index 00000000..d680a638 --- /dev/null +++ b/src/gradientai/types/api_model.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion + +__all__ = ["APIModel"] + + +class APIModel(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py new file mode 100644 index 00000000..2e118632 --- /dev/null +++ b/src/gradientai/types/api_model_version.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIModelVersion"] + + +class APIModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py new file mode 100644 index 00000000..39328f80 --- /dev/null +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .api_model import APIModel + +__all__ = ["APIOpenAIAPIKeyInfo"] + + +class APIOpenAIAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + models: Optional[List[APIModel]] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py new file mode 100644 index 00000000..d88d4791 --- /dev/null +++ b/tests/api_resources/test_agents.py @@ -0,0 +1,188 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import AgentListResponse, AgentCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + agent = client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.create( + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], + ) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + agent = client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.list( + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncAgents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.create( + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], + ) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.list( + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True From 5c86463eca454303ca4d787ee4cb2f451a814775 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:14:22 +0000 Subject: [PATCH 025/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 13 + src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 32 ++ src/gradientai/resources/agents/api_keys.py | 298 ++++++++++++++++++ src/gradientai/types/agents/__init__.py | 4 + .../types/agents/api_key_create_params.py | 15 + .../types/agents/api_key_create_response.py | 12 + .../types/agents/api_key_list_params.py | 15 + .../types/agents/api_key_list_response.py | 18 ++ tests/api_resources/agents/test_api_keys.py | 230 ++++++++++++++ 11 files changed, 654 insertions(+), 3 deletions(-) create mode 100644 src/gradientai/resources/agents/api_keys.py create mode 100644 src/gradientai/types/agents/api_key_create_params.py create mode 100644 src/gradientai/types/agents/api_key_create_response.py create mode 100644 src/gradientai/types/agents/api_key_list_params.py create mode 100644 src/gradientai/types/agents/api_key_list_response.py create mode 100644 tests/api_resources/agents/test_api_keys.py diff --git a/.stats.yml b/.stats.yml index 1a1a584d..e46abea5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 8 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2b5d132a76e51849a4fdbb2da2818132d1f8208f137acb86ee71e4a5c130154e.yml -openapi_spec_hash: 6c13968b99ef16b717854a096b6ca506 +configured_endpoints: 10 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-dfc4c90814a9503f4796d2b0ac258becf67a135292bd57d55545430bbc125770.yml +openapi_spec_hash: 55413c66920b0f073f598043822addb5 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index d39072be..88ff5fa1 100644 --- a/api.md +++ b/api.md @@ -21,6 +21,19 @@ Methods: - client.agents.create(\*\*params) -> AgentCreateResponse - client.agents.list(\*\*params) -> AgentListResponse +## APIKeys + +Types: + +```python +from gradientai.types.agents import APIKeyCreateResponse, APIKeyListResponse +``` + +Methods: + +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse + ## Versions Types: diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index 2ae2658b..a4d7d576 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -8,6 +8,14 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -18,6 +26,12 @@ ) __all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", "VersionsResource", "AsyncVersionsResource", "VersionsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index b42dc03c..30ae68da 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -9,6 +9,14 @@ from ...types import agent_list_params, agent_create_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -33,6 +41,10 @@ class AgentsResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + @cached_property def versions(self) -> VersionsResource: return VersionsResource(self._client) @@ -172,6 +184,10 @@ def list( class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + @cached_property def versions(self) -> AsyncVersionsResource: return AsyncVersionsResource(self._client) @@ -321,6 +337,10 @@ def __init__(self, agents: AgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) @@ -337,6 +357,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) @@ -353,6 +377,10 @@ def __init__(self, agents: AgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) @@ -369,6 +397,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py new file mode 100644 index 00000000..ad021d34 --- /dev/null +++ b/src/gradientai/resources/agents/api_keys.py @@ -0,0 +1,298 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents import api_key_list_params, api_key_create_params +from ...types.agents.api_key_list_response import APIKeyListResponse +from ...types.agents.api_key_create_response import APIKeyCreateResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create an agent API key, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "name": name, + }, + api_key_create_params.APIKeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + def list( + self, + agent_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all agent API keys, send a GET request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create an agent API key, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "name": name, + }, + api_key_create_params.APIKeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + async def list( + self, + agent_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all agent API keys, send a GET request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_raw_response_wrapper( + api_keys.create, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_raw_response_wrapper( + api_keys.create, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_streamed_response_wrapper( + api_keys.create, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_streamed_response_wrapper( + api_keys.create, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index fdee8834..f07f4bfd 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -4,7 +4,11 @@ from .api_meta import APIMeta as APIMeta from .api_links import APILinks as APILinks +from .api_key_list_params import APIKeyListParams as APIKeyListParams from .version_list_params import VersionListParams as VersionListParams +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py new file mode 100644 index 00000000..c3fc44cd --- /dev/null +++ b/src/gradientai/types/agents/api_key_create_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["APIKeyCreateParams"] + + +class APIKeyCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + name: str diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py new file mode 100644 index 00000000..09689fe7 --- /dev/null +++ b/src/gradientai/types/agents/api_key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyCreateResponse"] + + +class APIKeyCreateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py new file mode 100644 index 00000000..11da9398 --- /dev/null +++ b/src/gradientai/types/agents/api_key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py new file mode 100644 index 00000000..eff98649 --- /dev/null +++ b/src/gradientai/types/agents/api_key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .api_meta import APIMeta +from ..._models import BaseModel +from .api_links import APILinks +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py new file mode 100644 index 00000000..135fdb21 --- /dev/null +++ b/tests/api_resources/agents/test_api_keys.py @@ -0,0 +1,230 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import APIKeyListResponse, APIKeyCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.list( + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.list( + agent_uuid="agent_uuid", + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.list( + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.list( + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.list( + agent_uuid="", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.list( + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.list( + agent_uuid="agent_uuid", + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.list( + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.list( + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.list( + agent_uuid="", + ) From e85e058619d9c7a841d6c6fa869add48a185bdf7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:14:46 +0000 Subject: [PATCH 026/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 9 +- src/gradientai/resources/agents/api_keys.py | 198 ++++++++++++++- src/gradientai/types/agents/__init__.py | 3 + .../types/agents/api_key_delete_response.py | 12 + .../types/agents/api_key_update_params.py | 19 ++ .../types/agents/api_key_update_response.py | 12 + tests/api_resources/agents/test_api_keys.py | 239 +++++++++++++++++- 8 files changed, 492 insertions(+), 6 deletions(-) create mode 100644 src/gradientai/types/agents/api_key_delete_response.py create mode 100644 src/gradientai/types/agents/api_key_update_params.py create mode 100644 src/gradientai/types/agents/api_key_update_response.py diff --git a/.stats.yml b/.stats.yml index e46abea5..b93ec388 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 10 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-dfc4c90814a9503f4796d2b0ac258becf67a135292bd57d55545430bbc125770.yml -openapi_spec_hash: 55413c66920b0f073f598043822addb5 +configured_endpoints: 12 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2bbf73b1efbb5271e264d160d4d802781d18b94df56050565fb0579ba06147bd.yml +openapi_spec_hash: 40cced684005d4713404e1c77f0d194f config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index 88ff5fa1..79cfe708 100644 --- a/api.md +++ b/api.md @@ -26,13 +26,20 @@ Methods: Types: ```python -from gradientai.types.agents import APIKeyCreateResponse, APIKeyListResponse +from gradientai.types.agents import ( + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, +) ``` Methods: - client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse - client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse ## Versions diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index ad021d34..51fe4866 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -15,9 +15,11 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import api_key_list_params, api_key_create_params +from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params from ...types.agents.api_key_list_response import APIKeyListResponse from ...types.agents.api_key_create_response import APIKeyCreateResponse +from ...types.agents.api_key_delete_response import APIKeyDeleteResponse +from ...types.agents.api_key_update_response import APIKeyUpdateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] @@ -85,6 +87,54 @@ def create( cast_to=APIKeyCreateResponse, ) + def update( + self, + path_api_key_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + def list( self, agent_uuid: str, @@ -135,6 +185,43 @@ def list( cast_to=APIKeyListResponse, ) + def delete( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + class AsyncAPIKeysResource(AsyncAPIResource): @cached_property @@ -199,6 +286,54 @@ async def create( cast_to=APIKeyCreateResponse, ) + async def update( + self, + path_api_key_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + async def list( self, agent_uuid: str, @@ -249,6 +384,43 @@ async def list( cast_to=APIKeyListResponse, ) + async def delete( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + class APIKeysResourceWithRawResponse: def __init__(self, api_keys: APIKeysResource) -> None: @@ -257,9 +429,15 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.create = to_raw_response_wrapper( api_keys.create, ) + self.update = to_raw_response_wrapper( + api_keys.update, + ) self.list = to_raw_response_wrapper( api_keys.list, ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) class AsyncAPIKeysResourceWithRawResponse: @@ -269,9 +447,15 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.create = async_to_raw_response_wrapper( api_keys.create, ) + self.update = async_to_raw_response_wrapper( + api_keys.update, + ) self.list = async_to_raw_response_wrapper( api_keys.list, ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) class APIKeysResourceWithStreamingResponse: @@ -281,9 +465,15 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.create = to_streamed_response_wrapper( api_keys.create, ) + self.update = to_streamed_response_wrapper( + api_keys.update, + ) self.list = to_streamed_response_wrapper( api_keys.list, ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) class AsyncAPIKeysResourceWithStreamingResponse: @@ -293,6 +483,12 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.create = async_to_streamed_response_wrapper( api_keys.create, ) + self.update = async_to_streamed_response_wrapper( + api_keys.update, + ) self.list = async_to_streamed_response_wrapper( api_keys.list, ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index f07f4bfd..5bb6e6a9 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -8,7 +8,10 @@ from .version_list_params import VersionListParams as VersionListParams from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py new file mode 100644 index 00000000..02b03f61 --- /dev/null +++ b/src/gradientai/types/agents/api_key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py new file mode 100644 index 00000000..b49ebb38 --- /dev/null +++ b/src/gradientai/types/agents/api_key_update_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["APIKeyUpdateParams"] + + +class APIKeyUpdateParams(TypedDict, total=False): + path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] + + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py new file mode 100644 index 00000000..87442329 --- /dev/null +++ b/src/gradientai/types/agents/api_key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyUpdateResponse"] + + +class APIKeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 135fdb21..3eb348a7 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -9,7 +9,12 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APIKeyListResponse, APIKeyCreateResponse +from gradientai.types.agents import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -69,6 +74,70 @@ def test_path_params_create(self, client: GradientAI) -> None: path_agent_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="", + path_agent_uuid="agent_uuid", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -121,6 +190,58 @@ def test_path_params_list(self, client: GradientAI) -> None: agent_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.api_keys.with_raw_response.delete( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -177,6 +298,70 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: path_agent_uuid="", ) + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="", + path_agent_uuid="agent_uuid", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -228,3 +413,55 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: await async_client.agents.api_keys.with_raw_response.list( agent_uuid="", ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.delete( + api_key_uuid="", + agent_uuid="agent_uuid", + ) From bef7a67ba8f68d837b327da7656ed25ac7e806b1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:15:14 +0000 Subject: [PATCH 027/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 14 ++ src/gradientai/resources/agents/__init__.py | 14 ++ src/gradientai/resources/agents/agents.py | 32 +++ src/gradientai/resources/agents/api_keys.py | 87 ++++++++ src/gradientai/resources/agents/functions.py | 205 ++++++++++++++++++ src/gradientai/types/agents/__init__.py | 3 + .../agents/api_key_regenerate_response.py | 12 + .../types/agents/function_create_params.py | 25 +++ .../types/agents/function_create_response.py | 16 ++ tests/api_resources/agents/test_api_keys.py | 105 +++++++++ tests/api_resources/agents/test_functions.py | 136 ++++++++++++ 12 files changed, 652 insertions(+), 3 deletions(-) create mode 100644 src/gradientai/resources/agents/functions.py create mode 100644 src/gradientai/types/agents/api_key_regenerate_response.py create mode 100644 src/gradientai/types/agents/function_create_params.py create mode 100644 src/gradientai/types/agents/function_create_response.py create mode 100644 tests/api_resources/agents/test_functions.py diff --git a/.stats.yml b/.stats.yml index b93ec388..74e07701 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 12 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2bbf73b1efbb5271e264d160d4d802781d18b94df56050565fb0579ba06147bd.yml -openapi_spec_hash: 40cced684005d4713404e1c77f0d194f +configured_endpoints: 14 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-a98eb68f96d2983dda152d72f9dfe3722ac5dcb60759328fe72858d4e3d16821.yml +openapi_spec_hash: 57506039c91b1054fdd65fe84988f1f0 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index 79cfe708..cead5153 100644 --- a/api.md +++ b/api.md @@ -31,6 +31,7 @@ from gradientai.types.agents import ( APIKeyUpdateResponse, APIKeyListResponse, APIKeyDeleteResponse, + APIKeyRegenerateResponse, ) ``` @@ -40,6 +41,19 @@ Methods: - client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse - client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse - client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse + +## Functions + +Types: + +```python +from gradientai.types.agents import FunctionCreateResponse +``` + +Methods: + +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse ## Versions diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index a4d7d576..5502b6f2 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -24,6 +24,14 @@ VersionsResourceWithStreamingResponse, AsyncVersionsResourceWithStreamingResponse, ) +from .functions import ( + FunctionsResource, + AsyncFunctionsResource, + FunctionsResourceWithRawResponse, + AsyncFunctionsResourceWithRawResponse, + FunctionsResourceWithStreamingResponse, + AsyncFunctionsResourceWithStreamingResponse, +) __all__ = [ "APIKeysResource", @@ -32,6 +40,12 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", + "FunctionsResource", + "AsyncFunctionsResource", + "FunctionsResourceWithRawResponse", + "AsyncFunctionsResourceWithRawResponse", + "FunctionsResourceWithStreamingResponse", + "AsyncFunctionsResourceWithStreamingResponse", "VersionsResource", "AsyncVersionsResource", "VersionsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 30ae68da..f4490cef 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -26,6 +26,14 @@ AsyncVersionsResourceWithStreamingResponse, ) from ..._compat import cached_property +from .functions import ( + FunctionsResource, + AsyncFunctionsResource, + FunctionsResourceWithRawResponse, + AsyncFunctionsResourceWithRawResponse, + FunctionsResourceWithStreamingResponse, + AsyncFunctionsResourceWithStreamingResponse, +) from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( to_raw_response_wrapper, @@ -45,6 +53,10 @@ class AgentsResource(SyncAPIResource): def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) + @cached_property + def functions(self) -> FunctionsResource: + return FunctionsResource(self._client) + @cached_property def versions(self) -> VersionsResource: return VersionsResource(self._client) @@ -188,6 +200,10 @@ class AsyncAgentsResource(AsyncAPIResource): def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) + @cached_property + def functions(self) -> AsyncFunctionsResource: + return AsyncFunctionsResource(self._client) + @cached_property def versions(self) -> AsyncVersionsResource: return AsyncVersionsResource(self._client) @@ -341,6 +357,10 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def functions(self) -> FunctionsResourceWithRawResponse: + return FunctionsResourceWithRawResponse(self._agents.functions) + @cached_property def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) @@ -361,6 +381,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def functions(self) -> AsyncFunctionsResourceWithRawResponse: + return AsyncFunctionsResourceWithRawResponse(self._agents.functions) + @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) @@ -381,6 +405,10 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def functions(self) -> FunctionsResourceWithStreamingResponse: + return FunctionsResourceWithStreamingResponse(self._agents.functions) + @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) @@ -401,6 +429,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: + return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) + @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 51fe4866..7180503f 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -20,6 +20,7 @@ from ...types.agents.api_key_create_response import APIKeyCreateResponse from ...types.agents.api_key_delete_response import APIKeyDeleteResponse from ...types.agents.api_key_update_response import APIKeyUpdateResponse +from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] @@ -222,6 +223,43 @@ def delete( cast_to=APIKeyDeleteResponse, ) + def regenerate( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyRegenerateResponse: + """ + To regenerate an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyRegenerateResponse, + ) + class AsyncAPIKeysResource(AsyncAPIResource): @cached_property @@ -421,6 +459,43 @@ async def delete( cast_to=APIKeyDeleteResponse, ) + async def regenerate( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyRegenerateResponse: + """ + To regenerate an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyRegenerateResponse, + ) + class APIKeysResourceWithRawResponse: def __init__(self, api_keys: APIKeysResource) -> None: @@ -438,6 +513,9 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.delete = to_raw_response_wrapper( api_keys.delete, ) + self.regenerate = to_raw_response_wrapper( + api_keys.regenerate, + ) class AsyncAPIKeysResourceWithRawResponse: @@ -456,6 +534,9 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.delete = async_to_raw_response_wrapper( api_keys.delete, ) + self.regenerate = async_to_raw_response_wrapper( + api_keys.regenerate, + ) class APIKeysResourceWithStreamingResponse: @@ -474,6 +555,9 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.delete = to_streamed_response_wrapper( api_keys.delete, ) + self.regenerate = to_streamed_response_wrapper( + api_keys.regenerate, + ) class AsyncAPIKeysResourceWithStreamingResponse: @@ -492,3 +576,6 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.delete = async_to_streamed_response_wrapper( api_keys.delete, ) + self.regenerate = async_to_streamed_response_wrapper( + api_keys.regenerate, + ) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py new file mode 100644 index 00000000..010c0c2c --- /dev/null +++ b/src/gradientai/resources/agents/functions.py @@ -0,0 +1,205 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents import function_create_params +from ...types.agents.function_create_response import FunctionCreateResponse + +__all__ = ["FunctionsResource", "AsyncFunctionsResource"] + + +class FunctionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FunctionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return FunctionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return FunctionsResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionCreateResponse: + """ + To create a function route for an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/functions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_create_params.FunctionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionCreateResponse, + ) + + +class AsyncFunctionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncFunctionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncFunctionsResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionCreateResponse: + """ + To create a function route for an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/functions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_create_params.FunctionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionCreateResponse, + ) + + +class FunctionsResourceWithRawResponse: + def __init__(self, functions: FunctionsResource) -> None: + self._functions = functions + + self.create = to_raw_response_wrapper( + functions.create, + ) + + +class AsyncFunctionsResourceWithRawResponse: + def __init__(self, functions: AsyncFunctionsResource) -> None: + self._functions = functions + + self.create = async_to_raw_response_wrapper( + functions.create, + ) + + +class FunctionsResourceWithStreamingResponse: + def __init__(self, functions: FunctionsResource) -> None: + self._functions = functions + + self.create = to_streamed_response_wrapper( + functions.create, + ) + + +class AsyncFunctionsResourceWithStreamingResponse: + def __init__(self, functions: AsyncFunctionsResource) -> None: + self._functions = functions + + self.create = async_to_streamed_response_wrapper( + functions.create, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 5bb6e6a9..0ae9c73c 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -11,7 +11,10 @@ from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .function_create_params import FunctionCreateParams as FunctionCreateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse +from .function_create_response import FunctionCreateResponse as FunctionCreateResponse +from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py new file mode 100644 index 00000000..ea2f761e --- /dev/null +++ b/src/gradientai/types/agents/api_key_regenerate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyRegenerateResponse"] + + +class APIKeyRegenerateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py new file mode 100644 index 00000000..938fb1d5 --- /dev/null +++ b/src/gradientai/types/agents/function_create_params.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["FunctionCreateParams"] + + +class FunctionCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + description: str + + faas_name: str + + faas_namespace: str + + function_name: str + + input_schema: object + + output_schema: object diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py new file mode 100644 index 00000000..82ab984b --- /dev/null +++ b/src/gradientai/types/agents/function_create_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FunctionCreateResponse"] + + +class FunctionCreateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 3eb348a7..e8489258 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -14,6 +14,7 @@ APIKeyCreateResponse, APIKeyDeleteResponse, APIKeyUpdateResponse, + APIKeyRegenerateResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -242,6 +243,58 @@ def test_path_params_delete(self, client: GradientAI) -> None: agent_uuid="agent_uuid", ) + @pytest.mark.skip() + @parametrize + def test_method_regenerate(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_regenerate(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_regenerate(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_regenerate(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -465,3 +518,55 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: api_key_uuid="", agent_uuid="agent_uuid", ) + + @pytest.mark.skip() + @parametrize + async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py new file mode 100644 index 00000000..cb98e0bd --- /dev/null +++ b/tests/api_resources/agents/test_functions.py @@ -0,0 +1,136 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import FunctionCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFunctions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + function = client.agents.functions.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + function = client.agents.functions.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.functions.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.functions.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.functions.with_raw_response.create( + path_agent_uuid="", + ) + + +class TestAsyncFunctions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.functions.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.functions.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.create( + path_agent_uuid="", + ) From 88b8bba76454e2ac86452cb8d07e1b87e2c255a3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:15:34 +0000 Subject: [PATCH 028/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 12 +- src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 32 +++ src/gradientai/resources/agents/functions.py | 218 ++++++++++++++- .../resources/agents/knowledge_bases.py | 165 ++++++++++++ src/gradientai/types/agents/__init__.py | 4 + .../agents/api_link_knowledge_base_output.py | 16 ++ .../types/agents/function_delete_response.py | 16 ++ .../types/agents/function_update_params.py | 29 ++ .../types/agents/function_update_response.py | 16 ++ tests/api_resources/agents/test_functions.py | 248 +++++++++++++++++- .../agents/test_knowledge_bases.py | 106 ++++++++ 13 files changed, 876 insertions(+), 6 deletions(-) create mode 100644 src/gradientai/resources/agents/knowledge_bases.py create mode 100644 src/gradientai/types/agents/api_link_knowledge_base_output.py create mode 100644 src/gradientai/types/agents/function_delete_response.py create mode 100644 src/gradientai/types/agents/function_update_params.py create mode 100644 src/gradientai/types/agents/function_update_response.py create mode 100644 tests/api_resources/agents/test_knowledge_bases.py diff --git a/.stats.yml b/.stats.yml index 74e07701..9743a688 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 14 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-a98eb68f96d2983dda152d72f9dfe3722ac5dcb60759328fe72858d4e3d16821.yml -openapi_spec_hash: 57506039c91b1054fdd65fe84988f1f0 +configured_endpoints: 17 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-4fb25ab4cb2a89b06ad5e071dba45405224808d3208aed937c231003ab6fc5f6.yml +openapi_spec_hash: c41014abe91e4f7205d503900cd31568 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index cead5153..6a2b6456 100644 --- a/api.md +++ b/api.md @@ -48,12 +48,18 @@ Methods: Types: ```python -from gradientai.types.agents import FunctionCreateResponse +from gradientai.types.agents import ( + FunctionCreateResponse, + FunctionUpdateResponse, + FunctionDeleteResponse, +) ``` Methods: - client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions @@ -76,6 +82,10 @@ Types: from gradientai.types.agents import APILinkKnowledgeBaseOutput ``` +Methods: + +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput + # IndexingJobs Types: diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index 5502b6f2..5bdea838 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -32,6 +32,14 @@ FunctionsResourceWithStreamingResponse, AsyncFunctionsResourceWithStreamingResponse, ) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) __all__ = [ "APIKeysResource", @@ -52,6 +60,12 @@ "AsyncVersionsResourceWithRawResponse", "VersionsResourceWithStreamingResponse", "AsyncVersionsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index f4490cef..38444f9c 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -42,6 +42,14 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) from ...types.agent_list_response import AgentListResponse from ...types.agent_create_response import AgentCreateResponse @@ -61,6 +69,10 @@ def functions(self) -> FunctionsResource: def versions(self) -> VersionsResource: return VersionsResource(self._client) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + return KnowledgeBasesResource(self._client) + @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: """ @@ -208,6 +220,10 @@ def functions(self) -> AsyncFunctionsResource: def versions(self) -> AsyncVersionsResource: return AsyncVersionsResource(self._client) + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + return AsyncKnowledgeBasesResource(self._client) + @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ @@ -365,6 +381,10 @@ def functions(self) -> FunctionsResourceWithRawResponse: def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: + return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -389,6 +409,10 @@ def functions(self) -> AsyncFunctionsResourceWithRawResponse: def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: @@ -413,6 +437,10 @@ def functions(self) -> FunctionsResourceWithStreamingResponse: def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: + return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -436,3 +464,7 @@ def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 010c0c2c..6de9b141 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -15,8 +15,10 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import function_create_params +from ...types.agents import function_create_params, function_update_params from ...types.agents.function_create_response import FunctionCreateResponse +from ...types.agents.function_delete_response import FunctionDeleteResponse +from ...types.agents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] @@ -94,6 +96,101 @@ def create( cast_to=FunctionCreateResponse, ) + def update( + self, + path_function_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + body_function_uuid: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionUpdateResponse: + """ + To update the function route, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_function_uuid: + raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "body_function_uuid": body_function_uuid, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_update_params.FunctionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionUpdateResponse, + ) + + def delete( + self, + function_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionDeleteResponse: + """ + To delete a function route from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not function_uuid: + raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionDeleteResponse, + ) + class AsyncFunctionsResource(AsyncAPIResource): @cached_property @@ -168,6 +265,101 @@ async def create( cast_to=FunctionCreateResponse, ) + async def update( + self, + path_function_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + body_function_uuid: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionUpdateResponse: + """ + To update the function route, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_function_uuid: + raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "body_function_uuid": body_function_uuid, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_update_params.FunctionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionUpdateResponse, + ) + + async def delete( + self, + function_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionDeleteResponse: + """ + To delete a function route from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not function_uuid: + raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionDeleteResponse, + ) + class FunctionsResourceWithRawResponse: def __init__(self, functions: FunctionsResource) -> None: @@ -176,6 +368,12 @@ def __init__(self, functions: FunctionsResource) -> None: self.create = to_raw_response_wrapper( functions.create, ) + self.update = to_raw_response_wrapper( + functions.update, + ) + self.delete = to_raw_response_wrapper( + functions.delete, + ) class AsyncFunctionsResourceWithRawResponse: @@ -185,6 +383,12 @@ def __init__(self, functions: AsyncFunctionsResource) -> None: self.create = async_to_raw_response_wrapper( functions.create, ) + self.update = async_to_raw_response_wrapper( + functions.update, + ) + self.delete = async_to_raw_response_wrapper( + functions.delete, + ) class FunctionsResourceWithStreamingResponse: @@ -194,6 +398,12 @@ def __init__(self, functions: FunctionsResource) -> None: self.create = to_streamed_response_wrapper( functions.create, ) + self.update = to_streamed_response_wrapper( + functions.update, + ) + self.delete = to_streamed_response_wrapper( + functions.delete, + ) class AsyncFunctionsResourceWithStreamingResponse: @@ -203,3 +413,9 @@ def __init__(self, functions: AsyncFunctionsResource) -> None: self.create = async_to_streamed_response_wrapper( functions.create, ) + self.update = async_to_streamed_response_wrapper( + functions.update, + ) + self.delete = async_to_streamed_response_wrapper( + functions.delete, + ) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py new file mode 100644 index 00000000..3d65228a --- /dev/null +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -0,0 +1,165 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput + +__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] + + +class KnowledgeBasesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KnowledgeBasesResourceWithStreamingResponse(self) + + def attach( + self, + agent_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach knowledge bases to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + +class AsyncKnowledgeBasesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKnowledgeBasesResourceWithStreamingResponse(self) + + async def attach( + self, + agent_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach knowledge bases to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + +class KnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = to_raw_response_wrapper( + knowledge_bases.attach, + ) + + +class AsyncKnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = async_to_raw_response_wrapper( + knowledge_bases.attach, + ) + + +class KnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = to_streamed_response_wrapper( + knowledge_bases.attach, + ) + + +class AsyncKnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = async_to_streamed_response_wrapper( + knowledge_bases.attach, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 0ae9c73c..2a7a830e 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -12,9 +12,13 @@ from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams from .function_create_params import FunctionCreateParams as FunctionCreateParams +from .function_update_params import FunctionUpdateParams as FunctionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse from .function_create_response import FunctionCreateResponse as FunctionCreateResponse +from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse +from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse +from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py new file mode 100644 index 00000000..a38f021b --- /dev/null +++ b/src/gradientai/types/agents/api_link_knowledge_base_output.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APILinkKnowledgeBaseOutput"] + + +class APILinkKnowledgeBaseOutput(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py new file mode 100644 index 00000000..678ef62d --- /dev/null +++ b/src/gradientai/types/agents/function_delete_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FunctionDeleteResponse"] + + +class FunctionDeleteResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py new file mode 100644 index 00000000..2fa8e8f0 --- /dev/null +++ b/src/gradientai/types/agents/function_update_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["FunctionUpdateParams"] + + +class FunctionUpdateParams(TypedDict, total=False): + path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] + + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + description: str + + faas_name: str + + faas_namespace: str + + function_name: str + + body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] + + input_schema: object + + output_schema: object diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py new file mode 100644 index 00000000..82fc63be --- /dev/null +++ b/src/gradientai/types/agents/function_update_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FunctionUpdateResponse"] + + +class FunctionUpdateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index cb98e0bd..bfb05fa6 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -9,7 +9,11 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import FunctionCreateResponse +from gradientai.types.agents import ( + FunctionCreateResponse, + FunctionDeleteResponse, + FunctionUpdateResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -74,6 +78,127 @@ def test_path_params_create(self, client: GradientAI) -> None: path_agent_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + function = client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + function = client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.functions.with_streaming_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): + client.agents.functions.with_raw_response.update( + path_function_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + function = client.agents.functions.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.functions.with_streaming_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): + client.agents.functions.with_raw_response.delete( + function_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncFunctions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -134,3 +259,124 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: await async_client.agents.functions.with_raw_response.create( path_agent_uuid="", ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.functions.with_streaming_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.update( + path_function_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.functions.with_streaming_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.delete( + function_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py new file mode 100644 index 00000000..c8b5541d --- /dev/null +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -0,0 +1,106 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import APILinkKnowledgeBaseOutput + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKnowledgeBases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_attach(self, client: GradientAI) -> None: + knowledge_base = client.agents.knowledge_bases.attach( + "agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_attach(self, client: GradientAI) -> None: + response = client.agents.knowledge_bases.with_raw_response.attach( + "agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_attach(self, client: GradientAI) -> None: + with client.agents.knowledge_bases.with_streaming_response.attach( + "agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_attach(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.attach( + "", + ) + + +class TestAsyncKnowledgeBases: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_attach(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.agents.knowledge_bases.attach( + "agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.knowledge_bases.with_raw_response.attach( + "agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.knowledge_bases.with_streaming_response.attach( + "agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.attach( + "", + ) From 1f33863c6133413f9e82755493ab991a3ba69707 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:42:56 +0000 Subject: [PATCH 029/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- README.md | 14 +- api.md | 35 +- src/gradientai/_client.py | 76 ++- src/gradientai/resources/__init__.py | 28 + src/gradientai/resources/indexing_jobs.py | 543 ++++++++++++++++++ .../resources/knowledge_bases/__init__.py | 33 ++ .../resources/knowledge_bases/data_sources.py | 319 ++++++++++ .../knowledge_bases/knowledge_bases.py | 378 ++++++++++++ src/gradientai/types/__init__.py | 14 + .../types/indexing_job_create_params.py | 14 + .../types/indexing_job_create_response.py | 12 + .../types/indexing_job_list_params.py | 15 + .../types/indexing_job_list_response.py | 18 + ...xing_job_retrieve_data_sources_response.py | 52 ++ .../types/indexing_job_retrieve_response.py | 12 + .../indexing_job_update_cancel_params.py | 14 + .../indexing_job_update_cancel_response.py | 12 + .../types/knowledge_base_create_params.py | 64 +++ .../types/knowledge_base_create_response.py | 12 + .../types/knowledge_base_list_params.py | 15 + .../types/knowledge_base_list_response.py | 18 + .../types/knowledge_bases/__init__.py | 12 + .../api_file_upload_data_source.py | 15 + .../api_file_upload_data_source_param.py | 15 + .../api_knowledge_base_data_source.py | 35 ++ .../knowledge_bases/api_spaces_data_source.py | 15 + .../api_spaces_data_source_param.py | 15 + .../api_web_crawler_data_source.py | 26 + .../api_web_crawler_data_source_param.py | 25 + .../data_source_create_params.py | 33 ++ .../data_source_create_response.py | 12 + .../data_source_list_params.py | 15 + .../data_source_list_response.py | 18 + .../api_resources/knowledge_bases/__init__.py | 1 + .../knowledge_bases/test_data_sources.py | 269 +++++++++ tests/api_resources/test_indexing_jobs.py | 446 ++++++++++++++ tests/api_resources/test_knowledge_bases.py | 227 ++++++++ 38 files changed, 2867 insertions(+), 16 deletions(-) create mode 100644 src/gradientai/resources/indexing_jobs.py create mode 100644 src/gradientai/resources/knowledge_bases/__init__.py create mode 100644 src/gradientai/resources/knowledge_bases/data_sources.py create mode 100644 src/gradientai/resources/knowledge_bases/knowledge_bases.py create mode 100644 src/gradientai/types/indexing_job_create_params.py create mode 100644 src/gradientai/types/indexing_job_create_response.py create mode 100644 src/gradientai/types/indexing_job_list_params.py create mode 100644 src/gradientai/types/indexing_job_list_response.py create mode 100644 src/gradientai/types/indexing_job_retrieve_data_sources_response.py create mode 100644 src/gradientai/types/indexing_job_retrieve_response.py create mode 100644 src/gradientai/types/indexing_job_update_cancel_params.py create mode 100644 src/gradientai/types/indexing_job_update_cancel_response.py create mode 100644 src/gradientai/types/knowledge_base_create_params.py create mode 100644 src/gradientai/types/knowledge_base_create_response.py create mode 100644 src/gradientai/types/knowledge_base_list_params.py create mode 100644 src/gradientai/types/knowledge_base_list_response.py create mode 100644 src/gradientai/types/knowledge_bases/api_file_upload_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py create mode 100644 src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_spaces_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py create mode 100644 src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_create_params.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_create_response.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_list_params.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_list_response.py create mode 100644 tests/api_resources/knowledge_bases/__init__.py create mode 100644 tests/api_resources/knowledge_bases/test_data_sources.py create mode 100644 tests/api_resources/test_indexing_jobs.py create mode 100644 tests/api_resources/test_knowledge_bases.py diff --git a/.stats.yml b/.stats.yml index 9743a688..8135f5de 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-4fb25ab4cb2a89b06ad5e071dba45405224808d3208aed937c231003ab6fc5f6.yml -openapi_spec_hash: c41014abe91e4f7205d503900cd31568 +configured_endpoints: 26 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-d8b53b5432334e3c25a01f8afa9cc6bb9213c8deb83721113ac48e0544a45c6a.yml +openapi_spec_hash: f6129f6ab890acc4ce6da26611b8fe67 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/README.md b/README.md index a6757d3a..10236f18 100644 --- a/README.md +++ b/README.md @@ -90,17 +90,11 @@ from gradientai import GradientAI client = GradientAI() -response = client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - stream_options={}, +data_source = client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={}, ) -print(response.stream_options) +print(data_source.aws_data_source) ``` ## Handling errors diff --git a/api.md b/api.md index 6a2b6456..e8cac919 100644 --- a/api.md +++ b/api.md @@ -91,17 +91,41 @@ Methods: Types: ```python -from gradientai.types import APIIndexingJob +from gradientai.types import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) ``` +Methods: + +- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # KnowledgeBases Types: ```python -from gradientai.types import APIKnowledgeBase +from gradientai.types import ( + APIKnowledgeBase, + KnowledgeBaseCreateResponse, + KnowledgeBaseListResponse, +) ``` +Methods: + +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse + ## DataSources Types: @@ -112,9 +136,16 @@ from gradientai.types.knowledge_bases import ( APIKnowledgeBaseDataSource, APISpacesDataSource, APIWebCrawlerDataSource, + DataSourceCreateResponse, + DataSourceListResponse, ) ``` +Methods: + +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse + # APIKeys Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index b22056ad..ddf7beae 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,11 +31,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, embeddings + from .resources import chat, agents, models, embeddings, indexing_jobs, knowledge_bases from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ "Timeout", @@ -110,6 +112,18 @@ def agents(self) -> AgentsResource: return AgentsResource(self) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + from .resources.indexing_jobs import IndexingJobsResource + + return IndexingJobsResource(self) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + from .resources.knowledge_bases import KnowledgeBasesResource + + return KnowledgeBasesResource(self) + @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource @@ -302,6 +316,18 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + from .resources.indexing_jobs import AsyncIndexingJobsResource + + return AsyncIndexingJobsResource(self) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + from .resources.knowledge_bases import AsyncKnowledgeBasesResource + + return AsyncKnowledgeBasesResource(self) + @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource @@ -445,6 +471,18 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse + + return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse + + return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse @@ -476,6 +514,18 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse + + return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse + + return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse @@ -507,6 +557,18 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse + + return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse + + return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse @@ -538,6 +600,18 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse + + return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse + + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 386e2ed6..15e90bdb 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -32,6 +32,22 @@ EmbeddingsResourceWithStreamingResponse, AsyncEmbeddingsResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) __all__ = [ "AgentsResource", @@ -40,6 +56,18 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py new file mode 100644 index 00000000..6647d36c --- /dev/null +++ b/src/gradientai/resources/indexing_jobs.py @@ -0,0 +1,543 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.indexing_job_list_response import IndexingJobListResponse +from ..types.indexing_job_create_response import IndexingJobCreateResponse +from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse + +__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] + + +class IndexingJobsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return IndexingJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return IndexingJobsResourceWithStreamingResponse(self) + + def create( + self, + *, + data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobCreateResponse: + """ + To start an indexing job for a knowledge base, send a POST request to + `/v2/gen-ai/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/indexing_jobs", + body=maybe_transform( + { + "data_source_uuids": data_source_uuids, + "knowledge_base_uuid": knowledge_base_uuid, + }, + indexing_job_create_params.IndexingJobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobCreateResponse, + ) + + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveResponse: + """ + To get status of an indexing Job for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/indexing_jobs/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobListResponse: + """ + To list all indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + indexing_job_list_params.IndexingJobListParams, + ), + ), + cast_to=IndexingJobListResponse, + ) + + def retrieve_data_sources( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveDataSourcesResponse: + """ + To list all datasources for an indexing job, send a GET request to + `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return self._get( + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveDataSourcesResponse, + ) + + def update_cancel( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobUpdateCancelResponse: + """ + To cancel an indexing job for a knowledge base, send a PUT request to + `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. + + Args: + body_uuid: A unique identifier for an indexing job. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + body=maybe_transform( + {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobUpdateCancelResponse, + ) + + +class AsyncIndexingJobsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncIndexingJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncIndexingJobsResourceWithStreamingResponse(self) + + async def create( + self, + *, + data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobCreateResponse: + """ + To start an indexing job for a knowledge base, send a POST request to + `/v2/gen-ai/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/indexing_jobs", + body=await async_maybe_transform( + { + "data_source_uuids": data_source_uuids, + "knowledge_base_uuid": knowledge_base_uuid, + }, + indexing_job_create_params.IndexingJobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobCreateResponse, + ) + + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveResponse: + """ + To get status of an indexing Job for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/indexing_jobs/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobListResponse: + """ + To list all indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + indexing_job_list_params.IndexingJobListParams, + ), + ), + cast_to=IndexingJobListResponse, + ) + + async def retrieve_data_sources( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveDataSourcesResponse: + """ + To list all datasources for an indexing job, send a GET request to + `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return await self._get( + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveDataSourcesResponse, + ) + + async def update_cancel( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobUpdateCancelResponse: + """ + To cancel an indexing job for a knowledge base, send a PUT request to + `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. + + Args: + body_uuid: A unique identifier for an indexing job. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + body=await async_maybe_transform( + {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobUpdateCancelResponse, + ) + + +class IndexingJobsResourceWithRawResponse: + def __init__(self, indexing_jobs: IndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = to_raw_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = to_raw_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = to_raw_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = to_raw_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = to_raw_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class AsyncIndexingJobsResourceWithRawResponse: + def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = async_to_raw_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = async_to_raw_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = async_to_raw_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = async_to_raw_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class IndexingJobsResourceWithStreamingResponse: + def __init__(self, indexing_jobs: IndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = to_streamed_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = to_streamed_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = to_streamed_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = to_streamed_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = to_streamed_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class AsyncIndexingJobsResourceWithStreamingResponse: + def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = async_to_streamed_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = async_to_streamed_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = async_to_streamed_response_wrapper( + indexing_jobs.update_cancel, + ) diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py new file mode 100644 index 00000000..03d143e2 --- /dev/null +++ b/src/gradientai/resources/knowledge_bases/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .data_sources import ( + DataSourcesResource, + AsyncDataSourcesResource, + DataSourcesResourceWithRawResponse, + AsyncDataSourcesResourceWithRawResponse, + DataSourcesResourceWithStreamingResponse, + AsyncDataSourcesResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) + +__all__ = [ + "DataSourcesResource", + "AsyncDataSourcesResource", + "DataSourcesResourceWithRawResponse", + "AsyncDataSourcesResourceWithRawResponse", + "DataSourcesResourceWithStreamingResponse", + "AsyncDataSourcesResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py new file mode 100644 index 00000000..21bde932 --- /dev/null +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -0,0 +1,319 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.knowledge_bases import ( + data_source_list_params, + data_source_create_params, +) +from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse +from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse +from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] + + +class DataSourcesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DataSourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DataSourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DataSourcesResourceWithStreamingResponse(self) + + def create( + self, + path_knowledge_base_uuid: str, + *, + aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, + web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceCreateResponse: + """ + To add a data source to a knowledge base, send a POST request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + body=maybe_transform( + { + "aws_data_source": aws_data_source, + "body_knowledge_base_uuid": body_knowledge_base_uuid, + "spaces_data_source": spaces_data_source, + "web_crawler_data_source": web_crawler_data_source, + }, + data_source_create_params.DataSourceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreateResponse, + ) + + def list( + self, + knowledge_base_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceListResponse: + """ + To list all data sources for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + data_source_list_params.DataSourceListParams, + ), + ), + cast_to=DataSourceListResponse, + ) + + +class AsyncDataSourcesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDataSourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDataSourcesResourceWithStreamingResponse(self) + + async def create( + self, + path_knowledge_base_uuid: str, + *, + aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, + web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceCreateResponse: + """ + To add a data source to a knowledge base, send a POST request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + body=await async_maybe_transform( + { + "aws_data_source": aws_data_source, + "body_knowledge_base_uuid": body_knowledge_base_uuid, + "spaces_data_source": spaces_data_source, + "web_crawler_data_source": web_crawler_data_source, + }, + data_source_create_params.DataSourceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreateResponse, + ) + + async def list( + self, + knowledge_base_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceListResponse: + """ + To list all data sources for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + data_source_list_params.DataSourceListParams, + ), + ), + cast_to=DataSourceListResponse, + ) + + +class DataSourcesResourceWithRawResponse: + def __init__(self, data_sources: DataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = to_raw_response_wrapper( + data_sources.create, + ) + self.list = to_raw_response_wrapper( + data_sources.list, + ) + + +class AsyncDataSourcesResourceWithRawResponse: + def __init__(self, data_sources: AsyncDataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = async_to_raw_response_wrapper( + data_sources.create, + ) + self.list = async_to_raw_response_wrapper( + data_sources.list, + ) + + +class DataSourcesResourceWithStreamingResponse: + def __init__(self, data_sources: DataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = to_streamed_response_wrapper( + data_sources.create, + ) + self.list = to_streamed_response_wrapper( + data_sources.list, + ) + + +class AsyncDataSourcesResourceWithStreamingResponse: + def __init__(self, data_sources: AsyncDataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = async_to_streamed_response_wrapper( + data_sources.create, + ) + self.list = async_to_streamed_response_wrapper( + data_sources.list, + ) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py new file mode 100644 index 00000000..c49e23c4 --- /dev/null +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -0,0 +1,378 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable + +import httpx + +from ...types import knowledge_base_list_params, knowledge_base_create_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .data_sources import ( + DataSourcesResource, + AsyncDataSourcesResource, + DataSourcesResourceWithRawResponse, + AsyncDataSourcesResourceWithRawResponse, + DataSourcesResourceWithStreamingResponse, + AsyncDataSourcesResourceWithStreamingResponse, +) +from ..._base_client import make_request_options +from ...types.knowledge_base_list_response import KnowledgeBaseListResponse +from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse + +__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] + + +class KnowledgeBasesResource(SyncAPIResource): + @cached_property + def data_sources(self) -> DataSourcesResource: + return DataSourcesResource(self._client) + + @cached_property + def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KnowledgeBasesResourceWithStreamingResponse(self) + + def create( + self, + *, + database_id: str | NotGiven = NOT_GIVEN, + datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseCreateResponse: + """ + To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. + + Args: + database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + + datasources: The data sources to use for this knowledge base. See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + + embedding_model_uuid: Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + + name: Name of the knowledge base. + + project_id: Identifier of the DigitalOcean project this knowledge base will belong to. + + region: The datacenter region to deploy the knowledge base in. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/knowledge_bases", + body=maybe_transform( + { + "database_id": database_id, + "datasources": datasources, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "region": region, + "tags": tags, + "vpc_uuid": vpc_uuid, + }, + knowledge_base_create_params.KnowledgeBaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseCreateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseListResponse: + """ + To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + knowledge_base_list_params.KnowledgeBaseListParams, + ), + ), + cast_to=KnowledgeBaseListResponse, + ) + + +class AsyncKnowledgeBasesResource(AsyncAPIResource): + @cached_property + def data_sources(self) -> AsyncDataSourcesResource: + return AsyncDataSourcesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKnowledgeBasesResourceWithStreamingResponse(self) + + async def create( + self, + *, + database_id: str | NotGiven = NOT_GIVEN, + datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseCreateResponse: + """ + To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. + + Args: + database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + + datasources: The data sources to use for this knowledge base. See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + + embedding_model_uuid: Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + + name: Name of the knowledge base. + + project_id: Identifier of the DigitalOcean project this knowledge base will belong to. + + region: The datacenter region to deploy the knowledge base in. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/knowledge_bases", + body=await async_maybe_transform( + { + "database_id": database_id, + "datasources": datasources, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "region": region, + "tags": tags, + "vpc_uuid": vpc_uuid, + }, + knowledge_base_create_params.KnowledgeBaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseCreateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseListResponse: + """ + To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + knowledge_base_list_params.KnowledgeBaseListParams, + ), + ), + cast_to=KnowledgeBaseListResponse, + ) + + +class KnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = to_raw_response_wrapper( + knowledge_bases.create, + ) + self.list = to_raw_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> DataSourcesResourceWithRawResponse: + return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + + +class AsyncKnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = async_to_raw_response_wrapper( + knowledge_bases.create, + ) + self.list = async_to_raw_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: + return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + + +class KnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = to_streamed_response_wrapper( + knowledge_bases.create, + ) + self.list = to_streamed_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> DataSourcesResourceWithStreamingResponse: + return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + + +class AsyncKnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = async_to_streamed_response_wrapper( + knowledge_bases.create, + ) + self.list = async_to_streamed_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: + return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 25d7b58d..cb52748c 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -18,12 +18,26 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse +from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse +from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams +from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse +from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) from .chat_completion_request_message_content_part_text_param import ( ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, ) diff --git a/src/gradientai/types/indexing_job_create_params.py b/src/gradientai/types/indexing_job_create_params.py new file mode 100644 index 00000000..04838472 --- /dev/null +++ b/src/gradientai/types/indexing_job_create_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["IndexingJobCreateParams"] + + +class IndexingJobCreateParams(TypedDict, total=False): + data_source_uuids: List[str] + + knowledge_base_uuid: str diff --git a/src/gradientai/types/indexing_job_create_response.py b/src/gradientai/types/indexing_job_create_response.py new file mode 100644 index 00000000..839bc83b --- /dev/null +++ b/src/gradientai/types/indexing_job_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobCreateResponse"] + + +class IndexingJobCreateResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/gradientai/types/indexing_job_list_params.py b/src/gradientai/types/indexing_job_list_params.py new file mode 100644 index 00000000..90206aba --- /dev/null +++ b/src/gradientai/types/indexing_job_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["IndexingJobListParams"] + + +class IndexingJobListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py new file mode 100644 index 00000000..1379cc55 --- /dev/null +++ b/src/gradientai/types/indexing_job_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobListResponse"] + + +class IndexingJobListResponse(BaseModel): + jobs: Optional[List[APIIndexingJob]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py new file mode 100644 index 00000000..b178b984 --- /dev/null +++ b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] + + +class IndexedDataSource(BaseModel): + completed_at: Optional[datetime] = None + + data_source_uuid: Optional[str] = None + + error_details: Optional[str] = None + + error_msg: Optional[str] = None + + failed_item_count: Optional[str] = None + + indexed_file_count: Optional[str] = None + + indexed_item_count: Optional[str] = None + + removed_item_count: Optional[str] = None + + skipped_item_count: Optional[str] = None + + started_at: Optional[datetime] = None + + status: Optional[ + Literal[ + "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", + "DATA_SOURCE_STATUS_FAILED", + ] + ] = None + + total_bytes: Optional[str] = None + + total_bytes_indexed: Optional[str] = None + + total_file_count: Optional[str] = None + + +class IndexingJobRetrieveDataSourcesResponse(BaseModel): + indexed_data_sources: Optional[List[IndexedDataSource]] = None diff --git a/src/gradientai/types/indexing_job_retrieve_response.py b/src/gradientai/types/indexing_job_retrieve_response.py new file mode 100644 index 00000000..95f33d7a --- /dev/null +++ b/src/gradientai/types/indexing_job_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobRetrieveResponse"] + + +class IndexingJobRetrieveResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/gradientai/types/indexing_job_update_cancel_params.py b/src/gradientai/types/indexing_job_update_cancel_params.py new file mode 100644 index 00000000..4c2848b0 --- /dev/null +++ b/src/gradientai/types/indexing_job_update_cancel_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["IndexingJobUpdateCancelParams"] + + +class IndexingJobUpdateCancelParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """A unique identifier for an indexing job.""" diff --git a/src/gradientai/types/indexing_job_update_cancel_response.py b/src/gradientai/types/indexing_job_update_cancel_response.py new file mode 100644 index 00000000..d50e1865 --- /dev/null +++ b/src/gradientai/types/indexing_job_update_cancel_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobUpdateCancelResponse"] + + +class IndexingJobUpdateCancelResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py new file mode 100644 index 00000000..3a58166b --- /dev/null +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -0,0 +1,64 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import TypedDict + +from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam +from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["KnowledgeBaseCreateParams", "Datasource"] + + +class KnowledgeBaseCreateParams(TypedDict, total=False): + database_id: str + """ + Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + """ + + datasources: Iterable[Datasource] + """The data sources to use for this knowledge base. + + See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + """ + + embedding_model_uuid: str + """ + Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + """ + + name: str + """Name of the knowledge base.""" + + project_id: str + """Identifier of the DigitalOcean project this knowledge base will belong to.""" + + region: str + """The datacenter region to deploy the knowledge base in.""" + + tags: List[str] + """Tags to organize your knowledge base.""" + + vpc_uuid: str + + +class Datasource(TypedDict, total=False): + bucket_name: str + + bucket_region: str + + file_upload_data_source: APIFileUploadDataSourceParam + """File to upload as data source for knowledge base.""" + + item_path: str + + spaces_data_source: APISpacesDataSourceParam + + web_crawler_data_source: APIWebCrawlerDataSourceParam diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py new file mode 100644 index 00000000..cc2d8b9f --- /dev/null +++ b/src/gradientai/types/knowledge_base_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseCreateResponse"] + + +class KnowledgeBaseCreateResponse(BaseModel): + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py new file mode 100644 index 00000000..dcf9a0ec --- /dev/null +++ b/src/gradientai/types/knowledge_base_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KnowledgeBaseListParams"] + + +class KnowledgeBaseListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py new file mode 100644 index 00000000..09ca1ad3 --- /dev/null +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseListResponse"] + + +class KnowledgeBaseListResponse(BaseModel): + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index f8ee8b14..e716e1f6 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -1,3 +1,15 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource +from .data_source_list_params import DataSourceListParams as DataSourceListParams +from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams +from .data_source_list_response import DataSourceListResponse as DataSourceListResponse +from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource +from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource +from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse +from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource +from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam +from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py new file mode 100644 index 00000000..1dcc9639 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APIFileUploadDataSource"] + + +class APIFileUploadDataSource(BaseModel): + original_file_name: Optional[str] = None + + size_in_bytes: Optional[str] = None + + stored_object_key: Optional[str] = None diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py new file mode 100644 index 00000000..37221059 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIFileUploadDataSourceParam"] + + +class APIFileUploadDataSourceParam(TypedDict, total=False): + original_file_name: str + + size_in_bytes: str + + stored_object_key: str diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py new file mode 100644 index 00000000..df1cd3bb --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel +from ..api_indexing_job import APIIndexingJob +from .api_spaces_data_source import APISpacesDataSource +from .api_file_upload_data_source import APIFileUploadDataSource +from .api_web_crawler_data_source import APIWebCrawlerDataSource + +__all__ = ["APIKnowledgeBaseDataSource"] + + +class APIKnowledgeBaseDataSource(BaseModel): + bucket_name: Optional[str] = None + + created_at: Optional[datetime] = None + + file_upload_data_source: Optional[APIFileUploadDataSource] = None + """File to upload as data source for knowledge base.""" + + item_path: Optional[str] = None + + last_indexing_job: Optional[APIIndexingJob] = None + + region: Optional[str] = None + + spaces_data_source: Optional[APISpacesDataSource] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py new file mode 100644 index 00000000..f3a0421a --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APISpacesDataSource"] + + +class APISpacesDataSource(BaseModel): + bucket_name: Optional[str] = None + + item_path: Optional[str] = None + + region: Optional[str] = None diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py new file mode 100644 index 00000000..b7f2f657 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APISpacesDataSourceParam"] + + +class APISpacesDataSourceParam(TypedDict, total=False): + bucket_name: str + + item_path: str + + region: str diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py new file mode 100644 index 00000000..4690c607 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["APIWebCrawlerDataSource"] + + +class APIWebCrawlerDataSource(BaseModel): + base_url: Optional[str] = None + """The base url to crawl.""" + + crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]] = None + """Options for specifying how URLs found on pages should be handled. + + - UNKNOWN: Default unknown value + - SCOPED: Only include the base URL. + - PATH: Crawl the base URL and linked pages within the URL path. + - DOMAIN: Crawl the base URL and linked pages within the same domain. + - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. + """ + + embed_media: Optional[bool] = None + """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py new file mode 100644 index 00000000..2345ed3a --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["APIWebCrawlerDataSourceParam"] + + +class APIWebCrawlerDataSourceParam(TypedDict, total=False): + base_url: str + """The base url to crawl.""" + + crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"] + """Options for specifying how URLs found on pages should be handled. + + - UNKNOWN: Default unknown value + - SCOPED: Only include the base URL. + - PATH: Crawl the base URL and linked pages within the URL path. + - DOMAIN: Crawl the base URL and linked pages within the same domain. + - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. + """ + + embed_media: bool + """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py new file mode 100644 index 00000000..b1abafdf --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo +from .api_spaces_data_source_param import APISpacesDataSourceParam +from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["DataSourceCreateParams", "AwsDataSource"] + + +class DataSourceCreateParams(TypedDict, total=False): + aws_data_source: AwsDataSource + + body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] + + spaces_data_source: APISpacesDataSourceParam + + web_crawler_data_source: APIWebCrawlerDataSourceParam + + +class AwsDataSource(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py new file mode 100644 index 00000000..1035d3f4 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource + +__all__ = ["DataSourceCreateResponse"] + + +class DataSourceCreateResponse(BaseModel): + knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py new file mode 100644 index 00000000..e3ed5e3c --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["DataSourceListParams"] + + +class DataSourceListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py new file mode 100644 index 00000000..78246ce1 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource + +__all__ = ["DataSourceListResponse"] + + +class DataSourceListResponse(BaseModel): + knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/tests/api_resources/knowledge_bases/__init__.py b/tests/api_resources/knowledge_bases/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/knowledge_bases/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py new file mode 100644 index 00000000..cc90a9d7 --- /dev/null +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -0,0 +1,269 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.knowledge_bases import ( + DataSourceListResponse, + DataSourceCreateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDataSources: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, + body_knowledge_base_uuid="knowledge_base_uuid", + spaces_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + web_crawler_data_source={ + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" + ): + client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + page=0, + per_page=0, + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="", + ) + + +class TestAsyncDataSources: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, + body_knowledge_base_uuid="knowledge_base_uuid", + spaces_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + web_crawler_data_source={ + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.data_sources.with_streaming_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" + ): + await async_client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + page=0, + per_page=0, + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.data_sources.with_streaming_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="", + ) diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py new file mode 100644 index 00000000..d44a75ae --- /dev/null +++ b/tests/api_resources/test_indexing_jobs.py @@ -0,0 +1,446 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import ( + IndexingJobListResponse, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobUpdateCancelResponse, + IndexingJobRetrieveDataSourcesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestIndexingJobs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.create() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.create( + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.retrieve( + "uuid", + ) + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.indexing_jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.list() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.list( + page=0, + per_page=0, + ) + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_data_sources(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.retrieve_data_sources( + "indexing_job_uuid", + ) + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.retrieve_data_sources( + "indexing_job_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.retrieve_data_sources( + "indexing_job_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + client.indexing_jobs.with_raw_response.retrieve_data_sources( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_cancel(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.update_cancel( + path_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.update_cancel( + path_uuid="uuid", + body_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_cancel(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_cancel(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.update_cancel( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_cancel(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="", + ) + + +class TestAsyncIndexingJobs: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.create() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.create( + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.retrieve( + "uuid", + ) + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.indexing_jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.list() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.list( + page=0, + per_page=0, + ) + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.retrieve_data_sources( + "indexing_job_uuid", + ) + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + "indexing_job_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( + "indexing_job_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.update_cancel( + path_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.update_cancel( + path_uuid="uuid", + body_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.update_cancel( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="", + ) diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py new file mode 100644 index 00000000..bf761cf2 --- /dev/null +++ b/tests/api_resources/test_knowledge_bases.py @@ -0,0 +1,227 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import ( + KnowledgeBaseListResponse, + KnowledgeBaseCreateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKnowledgeBases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.create() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.create( + database_id="database_id", + datasources=[ + { + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", + "file_upload_data_source": { + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + "item_path": "item_path", + "spaces_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + "web_crawler_data_source": { + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + } + ], + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", + ) + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.list() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.list( + page=0, + per_page=0, + ) + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncKnowledgeBases: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.create() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.create( + database_id="database_id", + datasources=[ + { + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", + "file_upload_data_source": { + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + "item_path": "item_path", + "spaces_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + "web_crawler_data_source": { + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + } + ], + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", + ) + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.list() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.list( + page=0, + per_page=0, + ) + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True From 15f64e2be94f9c22ee3c7bb702d1f496fc86967b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:44:37 +0000 Subject: [PATCH 030/200] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 145 +++- src/gradientai/_client.py | 161 ++++- src/gradientai/resources/__init__.py | 56 ++ src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 497 ++++++++++++- .../resources/agents/child_agents.py | 508 ++++++++++++++ .../resources/agents/knowledge_bases.py | 181 +++++ src/gradientai/resources/api_keys/__init__.py | 19 + src/gradientai/resources/api_keys/api_keys.py | 275 ++++++++ .../resources/api_keys/api_keys_.py | 529 ++++++++++++++ src/gradientai/resources/auth/__init__.py | 33 + .../resources/auth/agents/__init__.py | 33 + .../resources/auth/agents/agents.py | 102 +++ src/gradientai/resources/auth/agents/token.py | 173 +++++ src/gradientai/resources/auth/auth.py | 102 +++ .../resources/knowledge_bases/data_sources.py | 91 +++ .../knowledge_bases/knowledge_bases.py | 291 +++++++- .../resources/providers/__init__.py | 47 ++ .../resources/providers/anthropic/__init__.py | 33 + .../providers/anthropic/anthropic.py | 102 +++ .../resources/providers/anthropic/keys.py | 662 ++++++++++++++++++ .../resources/providers/openai/__init__.py | 33 + .../resources/providers/openai/keys.py | 658 +++++++++++++++++ .../resources/providers/openai/openai.py | 102 +++ .../resources/providers/providers.py | 134 ++++ src/gradientai/resources/regions.py | 191 +++++ src/gradientai/types/__init__.py | 14 + src/gradientai/types/agent_delete_response.py | 16 + .../types/agent_retrieve_response.py | 16 + src/gradientai/types/agent_update_params.py | 65 ++ src/gradientai/types/agent_update_response.py | 16 + .../types/agent_update_status_params.py | 16 + .../types/agent_update_status_response.py | 16 + src/gradientai/types/agents/__init__.py | 7 + .../types/agents/child_agent_add_params.py | 22 + .../types/agents/child_agent_add_response.py | 14 + .../agents/child_agent_delete_response.py | 13 + .../types/agents/child_agent_update_params.py | 24 + .../agents/child_agent_update_response.py | 18 + .../types/agents/child_agent_view_response.py | 16 + .../agents/knowledge_base_detach_response.py | 16 + src/gradientai/types/api_key_list_params.py | 42 ++ src/gradientai/types/api_key_list_response.py | 42 ++ src/gradientai/types/api_keys/__init__.py | 10 + .../types/api_keys/api_key_create_params.py | 11 + .../types/api_keys/api_key_create_response.py | 12 + .../types/api_keys/api_key_delete_response.py | 12 + .../types/api_keys/api_key_list_params.py | 15 + .../types/api_keys/api_key_list_response.py | 18 + .../types/api_keys/api_key_update_params.py | 15 + .../api_key_update_regenerate_response.py | 12 + .../types/api_keys/api_key_update_response.py | 12 + .../types/api_keys/api_model_api_key_info.py | 22 + src/gradientai/types/auth/agents/__init__.py | 3 + .../types/auth/agents/token_create_params.py | 13 + .../auth/agents/token_create_response.py | 13 + .../types/knowledge_base_delete_response.py | 11 + .../types/knowledge_base_retrieve_response.py | 30 + .../types/knowledge_base_update_params.py | 27 + .../types/knowledge_base_update_response.py | 12 + .../types/knowledge_bases/__init__.py | 1 + .../data_source_delete_response.py | 13 + .../types/providers/anthropic/__init__.py | 11 + .../providers/anthropic/key_create_params.py | 13 + .../anthropic/key_create_response.py | 12 + .../anthropic/key_delete_response.py | 12 + .../anthropic/key_list_agents_params.py | 15 + .../anthropic/key_list_agents_response.py | 22 + .../providers/anthropic/key_list_params.py | 15 + .../providers/anthropic/key_list_response.py | 18 + .../anthropic/key_retrieve_response.py | 12 + .../providers/anthropic/key_update_params.py | 17 + .../anthropic/key_update_response.py | 12 + .../types/providers/openai/__init__.py | 11 + .../providers/openai/key_create_params.py | 13 + .../providers/openai/key_create_response.py | 12 + .../providers/openai/key_delete_response.py | 12 + .../types/providers/openai/key_list_params.py | 15 + .../providers/openai/key_list_response.py | 18 + .../openai/key_retrieve_agents_params.py | 15 + .../openai/key_retrieve_agents_response.py | 22 + .../providers/openai/key_retrieve_response.py | 12 + .../providers/openai/key_update_params.py | 17 + .../providers/openai/key_update_response.py | 12 + src/gradientai/types/region_list_params.py | 15 + src/gradientai/types/region_list_response.py | 23 + .../api_resources/agents/test_child_agents.py | 485 +++++++++++++ .../agents/test_knowledge_bases.py | 210 +++++- tests/api_resources/api_keys/__init__.py | 1 + .../api_resources/api_keys/test_api_keys_.py | 446 ++++++++++++ tests/api_resources/auth/__init__.py | 1 + tests/api_resources/auth/agents/__init__.py | 1 + tests/api_resources/auth/agents/test_token.py | 124 ++++ .../knowledge_bases/test_data_sources.py | 105 +++ tests/api_resources/providers/__init__.py | 1 + .../providers/anthropic/__init__.py | 1 + .../providers/anthropic/test_keys.py | 555 +++++++++++++++ .../providers/openai/__init__.py | 1 + .../providers/openai/test_keys.py | 555 +++++++++++++++ tests/api_resources/test_agents.py | 411 ++++++++++- tests/api_resources/test_api_keys.py | 100 +++ tests/api_resources/test_knowledge_bases.py | 283 ++++++++ tests/api_resources/test_regions.py | 96 +++ 104 files changed, 9534 insertions(+), 11 deletions(-) create mode 100644 src/gradientai/resources/agents/child_agents.py create mode 100644 src/gradientai/resources/api_keys/__init__.py create mode 100644 src/gradientai/resources/api_keys/api_keys.py create mode 100644 src/gradientai/resources/api_keys/api_keys_.py create mode 100644 src/gradientai/resources/auth/__init__.py create mode 100644 src/gradientai/resources/auth/agents/__init__.py create mode 100644 src/gradientai/resources/auth/agents/agents.py create mode 100644 src/gradientai/resources/auth/agents/token.py create mode 100644 src/gradientai/resources/auth/auth.py create mode 100644 src/gradientai/resources/providers/__init__.py create mode 100644 src/gradientai/resources/providers/anthropic/__init__.py create mode 100644 src/gradientai/resources/providers/anthropic/anthropic.py create mode 100644 src/gradientai/resources/providers/anthropic/keys.py create mode 100644 src/gradientai/resources/providers/openai/__init__.py create mode 100644 src/gradientai/resources/providers/openai/keys.py create mode 100644 src/gradientai/resources/providers/openai/openai.py create mode 100644 src/gradientai/resources/providers/providers.py create mode 100644 src/gradientai/resources/regions.py create mode 100644 src/gradientai/types/agent_delete_response.py create mode 100644 src/gradientai/types/agent_retrieve_response.py create mode 100644 src/gradientai/types/agent_update_params.py create mode 100644 src/gradientai/types/agent_update_response.py create mode 100644 src/gradientai/types/agent_update_status_params.py create mode 100644 src/gradientai/types/agent_update_status_response.py create mode 100644 src/gradientai/types/agents/child_agent_add_params.py create mode 100644 src/gradientai/types/agents/child_agent_add_response.py create mode 100644 src/gradientai/types/agents/child_agent_delete_response.py create mode 100644 src/gradientai/types/agents/child_agent_update_params.py create mode 100644 src/gradientai/types/agents/child_agent_update_response.py create mode 100644 src/gradientai/types/agents/child_agent_view_response.py create mode 100644 src/gradientai/types/agents/knowledge_base_detach_response.py create mode 100644 src/gradientai/types/api_key_list_params.py create mode 100644 src/gradientai/types/api_key_list_response.py create mode 100644 src/gradientai/types/api_keys/api_key_create_params.py create mode 100644 src/gradientai/types/api_keys/api_key_create_response.py create mode 100644 src/gradientai/types/api_keys/api_key_delete_response.py create mode 100644 src/gradientai/types/api_keys/api_key_list_params.py create mode 100644 src/gradientai/types/api_keys/api_key_list_response.py create mode 100644 src/gradientai/types/api_keys/api_key_update_params.py create mode 100644 src/gradientai/types/api_keys/api_key_update_regenerate_response.py create mode 100644 src/gradientai/types/api_keys/api_key_update_response.py create mode 100644 src/gradientai/types/api_keys/api_model_api_key_info.py create mode 100644 src/gradientai/types/auth/agents/token_create_params.py create mode 100644 src/gradientai/types/auth/agents/token_create_response.py create mode 100644 src/gradientai/types/knowledge_base_delete_response.py create mode 100644 src/gradientai/types/knowledge_base_retrieve_response.py create mode 100644 src/gradientai/types/knowledge_base_update_params.py create mode 100644 src/gradientai/types/knowledge_base_update_response.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_delete_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_create_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_create_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_delete_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_agents_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_agents_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_retrieve_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_update_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_update_response.py create mode 100644 src/gradientai/types/providers/openai/key_create_params.py create mode 100644 src/gradientai/types/providers/openai/key_create_response.py create mode 100644 src/gradientai/types/providers/openai/key_delete_response.py create mode 100644 src/gradientai/types/providers/openai/key_list_params.py create mode 100644 src/gradientai/types/providers/openai/key_list_response.py create mode 100644 src/gradientai/types/providers/openai/key_retrieve_agents_params.py create mode 100644 src/gradientai/types/providers/openai/key_retrieve_agents_response.py create mode 100644 src/gradientai/types/providers/openai/key_retrieve_response.py create mode 100644 src/gradientai/types/providers/openai/key_update_params.py create mode 100644 src/gradientai/types/providers/openai/key_update_response.py create mode 100644 src/gradientai/types/region_list_params.py create mode 100644 src/gradientai/types/region_list_response.py create mode 100644 tests/api_resources/agents/test_child_agents.py create mode 100644 tests/api_resources/api_keys/__init__.py create mode 100644 tests/api_resources/api_keys/test_api_keys_.py create mode 100644 tests/api_resources/auth/__init__.py create mode 100644 tests/api_resources/auth/agents/__init__.py create mode 100644 tests/api_resources/auth/agents/test_token.py create mode 100644 tests/api_resources/providers/__init__.py create mode 100644 tests/api_resources/providers/anthropic/__init__.py create mode 100644 tests/api_resources/providers/anthropic/test_keys.py create mode 100644 tests/api_resources/providers/openai/__init__.py create mode 100644 tests/api_resources/providers/openai/test_keys.py create mode 100644 tests/api_resources/test_api_keys.py create mode 100644 tests/api_resources/test_regions.py diff --git a/.stats.yml b/.stats.yml index 8135f5de..74cbd5c9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 26 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-d8b53b5432334e3c25a01f8afa9cc6bb9213c8deb83721113ac48e0544a45c6a.yml -openapi_spec_hash: f6129f6ab890acc4ce6da26611b8fe67 +configured_endpoints: 60 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml +openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index e8cac919..a3d3e8c1 100644 --- a/api.md +++ b/api.md @@ -12,14 +12,22 @@ from gradientai.types import ( APIOpenAIAPIKeyInfo, APIRetrievalMethod, AgentCreateResponse, + AgentRetrieveResponse, + AgentUpdateResponse, AgentListResponse, + AgentDeleteResponse, + AgentUpdateStatusResponse, ) ``` Methods: - client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse - client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys @@ -79,12 +87,116 @@ Methods: Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: - client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse + +## ChildAgents + +Types: + +```python +from gradientai.types.agents import ( + ChildAgentUpdateResponse, + ChildAgentDeleteResponse, + ChildAgentAddResponse, + ChildAgentViewResponse, +) +``` + +Methods: + +- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse + +# Providers + +## Anthropic + +### Keys + +Types: + +```python +from gradientai.types.providers.anthropic import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyListAgentsResponse, +) +``` + +Methods: + +- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse + +## OpenAI + +### Keys + +Types: + +```python +from gradientai.types.providers.openai import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyRetrieveAgentsResponse, +) +``` + +Methods: + +- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse + +# Auth + +## Agents + +### Token + +Types: + +```python +from gradientai.types.auth.agents import TokenCreateResponse +``` + +Methods: + +- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse + +# Regions + +Types: + +```python +from gradientai.types import RegionListResponse +``` + +Methods: + +- client.regions.list(\*\*params) -> RegionListResponse # IndexingJobs @@ -117,14 +229,20 @@ Types: from gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, + KnowledgeBaseRetrieveResponse, + KnowledgeBaseUpdateResponse, KnowledgeBaseListResponse, + KnowledgeBaseDeleteResponse, ) ``` Methods: - client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse - client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources @@ -138,6 +256,7 @@ from gradientai.types.knowledge_bases import ( APIWebCrawlerDataSource, DataSourceCreateResponse, DataSourceListResponse, + DataSourceDeleteResponse, ) ``` @@ -145,23 +264,43 @@ Methods: - client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse # APIKeys Types: ```python -from gradientai.types import APIAgreement, APIModelVersion +from gradientai.types import APIAgreement, APIModelVersion, APIKeyListResponse ``` +Methods: + +- client.api_keys.list(\*\*params) -> APIKeyListResponse + ## APIKeys Types: ```python -from gradientai.types.api_keys import APIModelAPIKeyInfo +from gradientai.types.api_keys import ( + APIModelAPIKeyInfo, + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, + APIKeyUpdateRegenerateResponse, +) ``` +Methods: + +- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse +- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse + # Chat Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index ddf7beae..5c0172c1 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,12 +31,27 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, embeddings, indexing_jobs, knowledge_bases + from .resources import ( + auth, + chat, + agents, + models, + regions, + api_keys, + providers, + embeddings, + indexing_jobs, + knowledge_bases, + ) from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource + from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.auth.auth import AuthResource, AsyncAuthResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource + from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ @@ -112,6 +127,24 @@ def agents(self) -> AgentsResource: return AgentsResource(self) + @cached_property + def providers(self) -> ProvidersResource: + from .resources.providers import ProvidersResource + + return ProvidersResource(self) + + @cached_property + def auth(self) -> AuthResource: + from .resources.auth import AuthResource + + return AuthResource(self) + + @cached_property + def regions(self) -> RegionsResource: + from .resources.regions import RegionsResource + + return RegionsResource(self) + @cached_property def indexing_jobs(self) -> IndexingJobsResource: from .resources.indexing_jobs import IndexingJobsResource @@ -124,6 +157,12 @@ def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self) + @cached_property + def api_keys(self) -> APIKeysResource: + from .resources.api_keys import APIKeysResource + + return APIKeysResource(self) + @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource @@ -316,6 +355,24 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) + @cached_property + def providers(self) -> AsyncProvidersResource: + from .resources.providers import AsyncProvidersResource + + return AsyncProvidersResource(self) + + @cached_property + def auth(self) -> AsyncAuthResource: + from .resources.auth import AsyncAuthResource + + return AsyncAuthResource(self) + + @cached_property + def regions(self) -> AsyncRegionsResource: + from .resources.regions import AsyncRegionsResource + + return AsyncRegionsResource(self) + @cached_property def indexing_jobs(self) -> AsyncIndexingJobsResource: from .resources.indexing_jobs import AsyncIndexingJobsResource @@ -328,6 +385,12 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self) + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + from .resources.api_keys import AsyncAPIKeysResource + + return AsyncAPIKeysResource(self) + @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource @@ -471,6 +534,24 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) + @cached_property + def providers(self) -> providers.ProvidersResourceWithRawResponse: + from .resources.providers import ProvidersResourceWithRawResponse + + return ProvidersResourceWithRawResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AuthResourceWithRawResponse: + from .resources.auth import AuthResourceWithRawResponse + + return AuthResourceWithRawResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.RegionsResourceWithRawResponse: + from .resources.regions import RegionsResourceWithRawResponse + + return RegionsResourceWithRawResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse @@ -483,6 +564,12 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: + from .resources.api_keys import APIKeysResourceWithRawResponse + + return APIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse @@ -514,6 +601,24 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: + from .resources.providers import AsyncProvidersResourceWithRawResponse + + return AsyncProvidersResourceWithRawResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AsyncAuthResourceWithRawResponse: + from .resources.auth import AsyncAuthResourceWithRawResponse + + return AsyncAuthResourceWithRawResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: + from .resources.regions import AsyncRegionsResourceWithRawResponse + + return AsyncRegionsResourceWithRawResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse @@ -526,6 +631,12 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: + from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse + + return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse @@ -557,6 +668,24 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def providers(self) -> providers.ProvidersResourceWithStreamingResponse: + from .resources.providers import ProvidersResourceWithStreamingResponse + + return ProvidersResourceWithStreamingResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AuthResourceWithStreamingResponse: + from .resources.auth import AuthResourceWithStreamingResponse + + return AuthResourceWithStreamingResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.RegionsResourceWithStreamingResponse: + from .resources.regions import RegionsResourceWithStreamingResponse + + return RegionsResourceWithStreamingResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse @@ -569,6 +698,12 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: + from .resources.api_keys import APIKeysResourceWithStreamingResponse + + return APIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse @@ -600,6 +735,24 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: + from .resources.providers import AsyncProvidersResourceWithStreamingResponse + + return AsyncProvidersResourceWithStreamingResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: + from .resources.auth import AsyncAuthResourceWithStreamingResponse + + return AsyncAuthResourceWithStreamingResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: + from .resources.regions import AsyncRegionsResourceWithStreamingResponse + + return AsyncRegionsResourceWithStreamingResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse @@ -612,6 +765,12 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: + from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse + + return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 15e90bdb..6dcbff02 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) from .chat import ( ChatResource, AsyncChatResource, @@ -24,6 +32,30 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) +from .regions import ( + RegionsResource, + AsyncRegionsResource, + RegionsResourceWithRawResponse, + AsyncRegionsResourceWithRawResponse, + RegionsResourceWithStreamingResponse, + AsyncRegionsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) from .embeddings import ( EmbeddingsResource, AsyncEmbeddingsResource, @@ -56,6 +88,24 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", + "RegionsResource", + "AsyncRegionsResource", + "RegionsResourceWithRawResponse", + "AsyncRegionsResourceWithRawResponse", + "RegionsResourceWithStreamingResponse", + "AsyncRegionsResourceWithStreamingResponse", "IndexingJobsResource", "AsyncIndexingJobsResource", "IndexingJobsResourceWithRawResponse", @@ -68,6 +118,12 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index 5bdea838..f41a0408 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -32,6 +32,14 @@ FunctionsResourceWithStreamingResponse, AsyncFunctionsResourceWithStreamingResponse, ) +from .child_agents import ( + ChildAgentsResource, + AsyncChildAgentsResource, + ChildAgentsResourceWithRawResponse, + AsyncChildAgentsResourceWithRawResponse, + ChildAgentsResourceWithStreamingResponse, + AsyncChildAgentsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -66,6 +74,12 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", + "ChildAgentsResource", + "AsyncChildAgentsResource", + "ChildAgentsResourceWithRawResponse", + "AsyncChildAgentsResourceWithRawResponse", + "ChildAgentsResourceWithStreamingResponse", + "AsyncChildAgentsResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 38444f9c..87e2aeca 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -6,7 +6,14 @@ import httpx -from ...types import agent_list_params, agent_create_params +from ...types import ( + APIRetrievalMethod, + APIDeploymentVisibility, + agent_list_params, + agent_create_params, + agent_update_params, + agent_update_status_params, +) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from .api_keys import ( @@ -41,6 +48,14 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .child_agents import ( + ChildAgentsResource, + AsyncChildAgentsResource, + ChildAgentsResourceWithRawResponse, + AsyncChildAgentsResourceWithRawResponse, + ChildAgentsResourceWithStreamingResponse, + AsyncChildAgentsResourceWithStreamingResponse, +) from ..._base_client import make_request_options from .knowledge_bases import ( KnowledgeBasesResource, @@ -51,7 +66,13 @@ AsyncKnowledgeBasesResourceWithStreamingResponse, ) from ...types.agent_list_response import AgentListResponse +from ...types.api_retrieval_method import APIRetrievalMethod from ...types.agent_create_response import AgentCreateResponse +from ...types.agent_delete_response import AgentDeleteResponse +from ...types.agent_update_response import AgentUpdateResponse +from ...types.agent_retrieve_response import AgentRetrieveResponse +from ...types.api_deployment_visibility import APIDeploymentVisibility +from ...types.agent_update_status_response import AgentUpdateStatusResponse __all__ = ["AgentsResource", "AsyncAgentsResource"] @@ -73,6 +94,10 @@ def versions(self) -> VersionsResource: def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self._client) + @cached_property + def child_agents(self) -> ChildAgentsResource: + return ChildAgentsResource(self._client) + @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: """ @@ -156,6 +181,130 @@ def create( cast_to=AgentCreateResponse, ) + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + def update( + self, + path_uuid: str, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + k: int | NotGiven = NOT_GIVEN, + max_tokens: int | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + provide_citations: bool | NotGiven = NOT_GIVEN, + retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + top_p: float | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateResponse: + """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + max_tokens: Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + + model_uuid: Identifier for the foundation model. + + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower + values produce more predictable and conservative responses, while higher values + encourage creativity and variation. + + top_p: Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_uuid}", + body=maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "k": k, + "max_tokens": max_tokens, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "provide_citations": provide_citations, + "retrieval_method": retrieval_method, + "tags": tags, + "temperature": temperature, + "top_p": top_p, + "body_uuid": body_uuid, + }, + agent_update_params.AgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateResponse, + ) + def list( self, *, @@ -206,6 +355,83 @@ def list( cast_to=AgentListResponse, ) + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentDeleteResponse: + """ + To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentDeleteResponse, + ) + + def update_status( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateStatusResponse: + """Check whether an agent is public or private. + + To update the agent status, send a + PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + body=maybe_transform( + { + "body_uuid": body_uuid, + "visibility": visibility, + }, + agent_update_status_params.AgentUpdateStatusParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateStatusResponse, + ) + class AsyncAgentsResource(AsyncAPIResource): @cached_property @@ -224,6 +450,10 @@ def versions(self) -> AsyncVersionsResource: def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self._client) + @cached_property + def child_agents(self) -> AsyncChildAgentsResource: + return AsyncChildAgentsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ @@ -307,6 +537,130 @@ async def create( cast_to=AgentCreateResponse, ) + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + async def update( + self, + path_uuid: str, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + k: int | NotGiven = NOT_GIVEN, + max_tokens: int | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + provide_citations: bool | NotGiven = NOT_GIVEN, + retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + top_p: float | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateResponse: + """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + max_tokens: Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + + model_uuid: Identifier for the foundation model. + + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower + values produce more predictable and conservative responses, while higher values + encourage creativity and variation. + + top_p: Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_uuid}", + body=await async_maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "k": k, + "max_tokens": max_tokens, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "provide_citations": provide_citations, + "retrieval_method": retrieval_method, + "tags": tags, + "temperature": temperature, + "top_p": top_p, + "body_uuid": body_uuid, + }, + agent_update_params.AgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateResponse, + ) + async def list( self, *, @@ -357,6 +711,83 @@ async def list( cast_to=AgentListResponse, ) + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentDeleteResponse: + """ + To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentDeleteResponse, + ) + + async def update_status( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateStatusResponse: + """Check whether an agent is public or private. + + To update the agent status, send a + PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + body=await async_maybe_transform( + { + "body_uuid": body_uuid, + "visibility": visibility, + }, + agent_update_status_params.AgentUpdateStatusParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateStatusResponse, + ) + class AgentsResourceWithRawResponse: def __init__(self, agents: AgentsResource) -> None: @@ -365,9 +796,21 @@ def __init__(self, agents: AgentsResource) -> None: self.create = to_raw_response_wrapper( agents.create, ) + self.retrieve = to_raw_response_wrapper( + agents.retrieve, + ) + self.update = to_raw_response_wrapper( + agents.update, + ) self.list = to_raw_response_wrapper( agents.list, ) + self.delete = to_raw_response_wrapper( + agents.delete, + ) + self.update_status = to_raw_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> APIKeysResourceWithRawResponse: @@ -385,6 +828,10 @@ def versions(self) -> VersionsResourceWithRawResponse: def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + @cached_property + def child_agents(self) -> ChildAgentsResourceWithRawResponse: + return ChildAgentsResourceWithRawResponse(self._agents.child_agents) + class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -393,9 +840,21 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.create = async_to_raw_response_wrapper( agents.create, ) + self.retrieve = async_to_raw_response_wrapper( + agents.retrieve, + ) + self.update = async_to_raw_response_wrapper( + agents.update, + ) self.list = async_to_raw_response_wrapper( agents.list, ) + self.delete = async_to_raw_response_wrapper( + agents.delete, + ) + self.update_status = async_to_raw_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: @@ -413,6 +872,10 @@ def versions(self) -> AsyncVersionsResourceWithRawResponse: def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + @cached_property + def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: + return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) + class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: @@ -421,9 +884,21 @@ def __init__(self, agents: AgentsResource) -> None: self.create = to_streamed_response_wrapper( agents.create, ) + self.retrieve = to_streamed_response_wrapper( + agents.retrieve, + ) + self.update = to_streamed_response_wrapper( + agents.update, + ) self.list = to_streamed_response_wrapper( agents.list, ) + self.delete = to_streamed_response_wrapper( + agents.delete, + ) + self.update_status = to_streamed_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> APIKeysResourceWithStreamingResponse: @@ -441,6 +916,10 @@ def versions(self) -> VersionsResourceWithStreamingResponse: def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + @cached_property + def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: + return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -449,9 +928,21 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.create = async_to_streamed_response_wrapper( agents.create, ) + self.retrieve = async_to_streamed_response_wrapper( + agents.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + agents.update, + ) self.list = async_to_streamed_response_wrapper( agents.list, ) + self.delete = async_to_streamed_response_wrapper( + agents.delete, + ) + self.update_status = async_to_streamed_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: @@ -468,3 +959,7 @@ def versions(self) -> AsyncVersionsResourceWithStreamingResponse: @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + + @cached_property + def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: + return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py new file mode 100644 index 00000000..1f7fe3ce --- /dev/null +++ b/src/gradientai/resources/agents/child_agents.py @@ -0,0 +1,508 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents import child_agent_add_params, child_agent_update_params +from ...types.agents.child_agent_add_response import ChildAgentAddResponse +from ...types.agents.child_agent_view_response import ChildAgentViewResponse +from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse + +__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] + + +class ChildAgentsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ChildAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ChildAgentsResourceWithStreamingResponse(self) + + def update( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentUpdateResponse: + """ + To update an agent route for an agent, send a PUT request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return self._put( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + "uuid": uuid, + }, + child_agent_update_params.ChildAgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentUpdateResponse, + ) + + def delete( + self, + child_agent_uuid: str, + *, + parent_agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentDeleteResponse: + """ + To delete an agent route from a parent agent, send a DELETE request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not parent_agent_uuid: + raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") + if not child_agent_uuid: + raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentDeleteResponse, + ) + + def add( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentAddResponse: + """ + To add an agent route to an agent, send a POST request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + }, + child_agent_add_params.ChildAgentAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentAddResponse, + ) + + def view( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentViewResponse: + """ + To view agent routes for an agent, send a GET requtest to + `/v2/gen-ai/agents/{uuid}/child_agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{uuid}/child_agents", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentViewResponse, + ) + + +class AsyncChildAgentsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncChildAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncChildAgentsResourceWithStreamingResponse(self) + + async def update( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentUpdateResponse: + """ + To update an agent route for an agent, send a PUT request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return await self._put( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=await async_maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + "uuid": uuid, + }, + child_agent_update_params.ChildAgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentUpdateResponse, + ) + + async def delete( + self, + child_agent_uuid: str, + *, + parent_agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentDeleteResponse: + """ + To delete an agent route from a parent agent, send a DELETE request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not parent_agent_uuid: + raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") + if not child_agent_uuid: + raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentDeleteResponse, + ) + + async def add( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentAddResponse: + """ + To add an agent route to an agent, send a POST request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=await async_maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + }, + child_agent_add_params.ChildAgentAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentAddResponse, + ) + + async def view( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentViewResponse: + """ + To view agent routes for an agent, send a GET requtest to + `/v2/gen-ai/agents/{uuid}/child_agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{uuid}/child_agents", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentViewResponse, + ) + + +class ChildAgentsResourceWithRawResponse: + def __init__(self, child_agents: ChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = to_raw_response_wrapper( + child_agents.update, + ) + self.delete = to_raw_response_wrapper( + child_agents.delete, + ) + self.add = to_raw_response_wrapper( + child_agents.add, + ) + self.view = to_raw_response_wrapper( + child_agents.view, + ) + + +class AsyncChildAgentsResourceWithRawResponse: + def __init__(self, child_agents: AsyncChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = async_to_raw_response_wrapper( + child_agents.update, + ) + self.delete = async_to_raw_response_wrapper( + child_agents.delete, + ) + self.add = async_to_raw_response_wrapper( + child_agents.add, + ) + self.view = async_to_raw_response_wrapper( + child_agents.view, + ) + + +class ChildAgentsResourceWithStreamingResponse: + def __init__(self, child_agents: ChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = to_streamed_response_wrapper( + child_agents.update, + ) + self.delete = to_streamed_response_wrapper( + child_agents.delete, + ) + self.add = to_streamed_response_wrapper( + child_agents.add, + ) + self.view = to_streamed_response_wrapper( + child_agents.view, + ) + + +class AsyncChildAgentsResourceWithStreamingResponse: + def __init__(self, child_agents: AsyncChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = async_to_streamed_response_wrapper( + child_agents.update, + ) + self.delete = async_to_streamed_response_wrapper( + child_agents.delete, + ) + self.add = async_to_streamed_response_wrapper( + child_agents.add, + ) + self.view = async_to_streamed_response_wrapper( + child_agents.view, + ) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index 3d65228a..97b086e0 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -15,6 +15,7 @@ ) from ..._base_client import make_request_options from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] @@ -73,6 +74,84 @@ def attach( cast_to=APILinkKnowledgeBaseOutput, ) + def attach_single( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach a knowledge base to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + def detach( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDetachResponse: + """ + To detach a knowledge base from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDetachResponse, + ) + class AsyncKnowledgeBasesResource(AsyncAPIResource): @cached_property @@ -128,6 +207,84 @@ async def attach( cast_to=APILinkKnowledgeBaseOutput, ) + async def attach_single( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach a knowledge base to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + async def detach( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDetachResponse: + """ + To detach a knowledge base from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDetachResponse, + ) + class KnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -136,6 +293,12 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.attach = to_raw_response_wrapper( knowledge_bases.attach, ) + self.attach_single = to_raw_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = to_raw_response_wrapper( + knowledge_bases.detach, + ) class AsyncKnowledgeBasesResourceWithRawResponse: @@ -145,6 +308,12 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.attach = async_to_raw_response_wrapper( knowledge_bases.attach, ) + self.attach_single = async_to_raw_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = async_to_raw_response_wrapper( + knowledge_bases.detach, + ) class KnowledgeBasesResourceWithStreamingResponse: @@ -154,6 +323,12 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.attach = to_streamed_response_wrapper( knowledge_bases.attach, ) + self.attach_single = to_streamed_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = to_streamed_response_wrapper( + knowledge_bases.detach, + ) class AsyncKnowledgeBasesResourceWithStreamingResponse: @@ -163,3 +338,9 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.attach = async_to_streamed_response_wrapper( knowledge_bases.attach, ) + self.attach_single = async_to_streamed_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = async_to_streamed_response_wrapper( + knowledge_bases.detach, + ) diff --git a/src/gradientai/resources/api_keys/__init__.py b/src/gradientai/resources/api_keys/__init__.py new file mode 100644 index 00000000..ed14565c --- /dev/null +++ b/src/gradientai/resources/api_keys/__init__.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py new file mode 100644 index 00000000..aecccfc3 --- /dev/null +++ b/src/gradientai/resources/api_keys/api_keys.py @@ -0,0 +1,275 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from . import api_keys_ as api_keys +from ...types import api_key_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.api_key_list_response import APIKeyListResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def api_keys(self) -> api_keys.APIKeysResource: + return api_keys.APIKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResource: + return api_keys.AsyncAPIKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.list = to_raw_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: + return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: + return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: + return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: + return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys/api_keys_.py new file mode 100644 index 00000000..969bcfb9 --- /dev/null +++ b/src/gradientai/resources/api_keys/api_keys_.py @@ -0,0 +1,529 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.api_keys.api_key_list_response import APIKeyListResponse +from ...types.api_keys.api_key_create_response import APIKeyCreateResponse +from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse +from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse +from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/models/api_keys", + body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + body=maybe_transform( + { + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/models/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for a model, send a DELETE request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + def update_regenerate( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateRegenerateResponse: + """ + To regenerate a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateRegenerateResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/models/api_keys", + body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/models/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for a model, send a DELETE request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + async def update_regenerate( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateRegenerateResponse: + """ + To regenerate a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateRegenerateResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_raw_response_wrapper( + api_keys.create, + ) + self.update = to_raw_response_wrapper( + api_keys.update, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = to_raw_response_wrapper( + api_keys.update_regenerate, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_raw_response_wrapper( + api_keys.create, + ) + self.update = async_to_raw_response_wrapper( + api_keys.update, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = async_to_raw_response_wrapper( + api_keys.update_regenerate, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_streamed_response_wrapper( + api_keys.create, + ) + self.update = to_streamed_response_wrapper( + api_keys.update, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = to_streamed_response_wrapper( + api_keys.update_regenerate, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_streamed_response_wrapper( + api_keys.create, + ) + self.update = async_to_streamed_response_wrapper( + api_keys.update, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = async_to_streamed_response_wrapper( + api_keys.update_regenerate, + ) diff --git a/src/gradientai/resources/auth/__init__.py b/src/gradientai/resources/auth/__init__.py new file mode 100644 index 00000000..7c844a98 --- /dev/null +++ b/src/gradientai/resources/auth/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/auth/agents/__init__.py b/src/gradientai/resources/auth/agents/__init__.py new file mode 100644 index 00000000..2972198f --- /dev/null +++ b/src/gradientai/resources/auth/agents/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .token import ( + TokenResource, + AsyncTokenResource, + TokenResourceWithRawResponse, + AsyncTokenResourceWithRawResponse, + TokenResourceWithStreamingResponse, + AsyncTokenResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = [ + "TokenResource", + "AsyncTokenResource", + "TokenResourceWithRawResponse", + "AsyncTokenResourceWithRawResponse", + "TokenResourceWithStreamingResponse", + "AsyncTokenResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/auth/agents/agents.py b/src/gradientai/resources/auth/agents/agents.py new file mode 100644 index 00000000..52426560 --- /dev/null +++ b/src/gradientai/resources/auth/agents/agents.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .token import ( + TokenResource, + AsyncTokenResource, + TokenResourceWithRawResponse, + AsyncTokenResourceWithRawResponse, + TokenResourceWithStreamingResponse, + AsyncTokenResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AgentsResource", "AsyncAgentsResource"] + + +class AgentsResource(SyncAPIResource): + @cached_property + def token(self) -> TokenResource: + return TokenResource(self._client) + + @cached_property + def with_raw_response(self) -> AgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AgentsResourceWithStreamingResponse(self) + + +class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def token(self) -> AsyncTokenResource: + return AsyncTokenResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAgentsResourceWithStreamingResponse(self) + + +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> TokenResourceWithRawResponse: + return TokenResourceWithRawResponse(self._agents.token) + + +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> AsyncTokenResourceWithRawResponse: + return AsyncTokenResourceWithRawResponse(self._agents.token) + + +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> TokenResourceWithStreamingResponse: + return TokenResourceWithStreamingResponse(self._agents.token) + + +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> AsyncTokenResourceWithStreamingResponse: + return AsyncTokenResourceWithStreamingResponse(self._agents.token) diff --git a/src/gradientai/resources/auth/agents/token.py b/src/gradientai/resources/auth/agents/token.py new file mode 100644 index 00000000..26de7c06 --- /dev/null +++ b/src/gradientai/resources/auth/agents/token.py @@ -0,0 +1,173 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.auth.agents import token_create_params +from ....types.auth.agents.token_create_response import TokenCreateResponse + +__all__ = ["TokenResource", "AsyncTokenResource"] + + +class TokenResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> TokenResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return TokenResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> TokenResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return TokenResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TokenCreateResponse: + """ + To issue an agent token, send a POST request to + `/v2/gen-ai/auth/agents/{agent_uuid}/token`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", + body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TokenCreateResponse, + ) + + +class AsyncTokenResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncTokenResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncTokenResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TokenCreateResponse: + """ + To issue an agent token, send a POST request to + `/v2/gen-ai/auth/agents/{agent_uuid}/token`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", + body=await async_maybe_transform( + {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TokenCreateResponse, + ) + + +class TokenResourceWithRawResponse: + def __init__(self, token: TokenResource) -> None: + self._token = token + + self.create = to_raw_response_wrapper( + token.create, + ) + + +class AsyncTokenResourceWithRawResponse: + def __init__(self, token: AsyncTokenResource) -> None: + self._token = token + + self.create = async_to_raw_response_wrapper( + token.create, + ) + + +class TokenResourceWithStreamingResponse: + def __init__(self, token: TokenResource) -> None: + self._token = token + + self.create = to_streamed_response_wrapper( + token.create, + ) + + +class AsyncTokenResourceWithStreamingResponse: + def __init__(self, token: AsyncTokenResource) -> None: + self._token = token + + self.create = async_to_streamed_response_wrapper( + token.create, + ) diff --git a/src/gradientai/resources/auth/auth.py b/src/gradientai/resources/auth/auth.py new file mode 100644 index 00000000..7a502a2c --- /dev/null +++ b/src/gradientai/resources/auth/auth.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .agents.agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = ["AuthResource", "AsyncAuthResource"] + + +class AuthResource(SyncAPIResource): + @cached_property + def agents(self) -> AgentsResource: + return AgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AuthResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AuthResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AuthResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AuthResourceWithStreamingResponse(self) + + +class AsyncAuthResource(AsyncAPIResource): + @cached_property + def agents(self) -> AsyncAgentsResource: + return AsyncAgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAuthResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAuthResourceWithStreamingResponse(self) + + +class AuthResourceWithRawResponse: + def __init__(self, auth: AuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AgentsResourceWithRawResponse: + return AgentsResourceWithRawResponse(self._auth.agents) + + +class AsyncAuthResourceWithRawResponse: + def __init__(self, auth: AsyncAuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AsyncAgentsResourceWithRawResponse: + return AsyncAgentsResourceWithRawResponse(self._auth.agents) + + +class AuthResourceWithStreamingResponse: + def __init__(self, auth: AuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AgentsResourceWithStreamingResponse: + return AgentsResourceWithStreamingResponse(self._auth.agents) + + +class AsyncAuthResourceWithStreamingResponse: + def __init__(self, auth: AsyncAuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AsyncAgentsResourceWithStreamingResponse: + return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index 21bde932..a1d2c575 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -21,6 +21,7 @@ ) from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse +from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam @@ -148,6 +149,45 @@ def list( cast_to=DataSourceListResponse, ) + def delete( + self, + data_source_uuid: str, + *, + knowledge_base_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceDeleteResponse: + """ + To delete a data source from a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + if not data_source_uuid: + raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") + return self._delete( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceDeleteResponse, + ) + class AsyncDataSourcesResource(AsyncAPIResource): @cached_property @@ -270,6 +310,45 @@ async def list( cast_to=DataSourceListResponse, ) + async def delete( + self, + data_source_uuid: str, + *, + knowledge_base_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceDeleteResponse: + """ + To delete a data source from a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + if not data_source_uuid: + raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") + return await self._delete( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceDeleteResponse, + ) + class DataSourcesResourceWithRawResponse: def __init__(self, data_sources: DataSourcesResource) -> None: @@ -281,6 +360,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.list = to_raw_response_wrapper( data_sources.list, ) + self.delete = to_raw_response_wrapper( + data_sources.delete, + ) class AsyncDataSourcesResourceWithRawResponse: @@ -293,6 +375,9 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.list = async_to_raw_response_wrapper( data_sources.list, ) + self.delete = async_to_raw_response_wrapper( + data_sources.delete, + ) class DataSourcesResourceWithStreamingResponse: @@ -305,6 +390,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.list = to_streamed_response_wrapper( data_sources.list, ) + self.delete = to_streamed_response_wrapper( + data_sources.delete, + ) class AsyncDataSourcesResourceWithStreamingResponse: @@ -317,3 +405,6 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.list = async_to_streamed_response_wrapper( data_sources.list, ) + self.delete = async_to_streamed_response_wrapper( + data_sources.delete, + ) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index c49e23c4..7d4f38e3 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -6,7 +6,7 @@ import httpx -from ...types import knowledge_base_list_params, knowledge_base_create_params +from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property @@ -28,6 +28,9 @@ from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse +from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse +from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse +from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] @@ -126,6 +129,97 @@ def create( cast_to=KnowledgeBaseCreateResponse, ) + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseRetrieveResponse: + """ + To retrive information about an existing knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseRetrieveResponse, + ) + + def update( + self, + path_uuid: str, + *, + database_id: str | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseUpdateResponse: + """ + To update a knowledge base, send a PUT request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + + embedding_model_uuid: Identifier for the foundation model. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/knowledge_bases/{path_uuid}", + body=maybe_transform( + { + "database_id": database_id, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "tags": tags, + "body_uuid": body_uuid, + }, + knowledge_base_update_params.KnowledgeBaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseUpdateResponse, + ) + def list( self, *, @@ -172,6 +266,40 @@ def list( cast_to=KnowledgeBaseListResponse, ) + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDeleteResponse: + """ + To delete a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDeleteResponse, + ) + class AsyncKnowledgeBasesResource(AsyncAPIResource): @cached_property @@ -267,6 +395,97 @@ async def create( cast_to=KnowledgeBaseCreateResponse, ) + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseRetrieveResponse: + """ + To retrive information about an existing knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseRetrieveResponse, + ) + + async def update( + self, + path_uuid: str, + *, + database_id: str | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseUpdateResponse: + """ + To update a knowledge base, send a PUT request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + + embedding_model_uuid: Identifier for the foundation model. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/knowledge_bases/{path_uuid}", + body=await async_maybe_transform( + { + "database_id": database_id, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "tags": tags, + "body_uuid": body_uuid, + }, + knowledge_base_update_params.KnowledgeBaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseUpdateResponse, + ) + async def list( self, *, @@ -313,6 +532,40 @@ async def list( cast_to=KnowledgeBaseListResponse, ) + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDeleteResponse: + """ + To delete a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDeleteResponse, + ) + class KnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -321,9 +574,18 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.create = to_raw_response_wrapper( knowledge_bases.create, ) + self.retrieve = to_raw_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = to_raw_response_wrapper( + knowledge_bases.update, + ) self.list = to_raw_response_wrapper( knowledge_bases.list, ) + self.delete = to_raw_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> DataSourcesResourceWithRawResponse: @@ -337,9 +599,18 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.create = async_to_raw_response_wrapper( knowledge_bases.create, ) + self.retrieve = async_to_raw_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = async_to_raw_response_wrapper( + knowledge_bases.update, + ) self.list = async_to_raw_response_wrapper( knowledge_bases.list, ) + self.delete = async_to_raw_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: @@ -353,9 +624,18 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.create = to_streamed_response_wrapper( knowledge_bases.create, ) + self.retrieve = to_streamed_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = to_streamed_response_wrapper( + knowledge_bases.update, + ) self.list = to_streamed_response_wrapper( knowledge_bases.list, ) + self.delete = to_streamed_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> DataSourcesResourceWithStreamingResponse: @@ -369,9 +649,18 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.create = async_to_streamed_response_wrapper( knowledge_bases.create, ) + self.retrieve = async_to_streamed_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + knowledge_bases.update, + ) self.list = async_to_streamed_response_wrapper( knowledge_bases.list, ) + self.delete = async_to_streamed_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: diff --git a/src/gradientai/resources/providers/__init__.py b/src/gradientai/resources/providers/__init__.py new file mode 100644 index 00000000..1731e057 --- /dev/null +++ b/src/gradientai/resources/providers/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) + +__all__ = [ + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/providers/anthropic/__init__.py b/src/gradientai/resources/providers/anthropic/__init__.py new file mode 100644 index 00000000..057a3a2f --- /dev/null +++ b/src/gradientai/resources/providers/anthropic/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/gradientai/resources/providers/anthropic/anthropic.py new file mode 100644 index 00000000..23a914e9 --- /dev/null +++ b/src/gradientai/resources/providers/anthropic/anthropic.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AnthropicResource", "AsyncAnthropicResource"] + + +class AnthropicResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AnthropicResourceWithStreamingResponse(self) + + +class AsyncAnthropicResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAnthropicResourceWithStreamingResponse(self) + + +class AnthropicResourceWithRawResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithRawResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._anthropic.keys) + + +class AnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py new file mode 100644 index 00000000..8fbb64db --- /dev/null +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -0,0 +1,662 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params +from ....types.providers.anthropic.key_list_response import KeyListResponse +from ....types.providers.anthropic.key_create_response import KeyCreateResponse +from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse +from ....types.providers.anthropic.key_update_response import KeyUpdateResponse +from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/anthropic/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/anthropic/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = to_raw_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_raw_response_wrapper( + keys.list_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = to_streamed_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_streamed_response_wrapper( + keys.list_agents, + ) diff --git a/src/gradientai/resources/providers/openai/__init__.py b/src/gradientai/resources/providers/openai/__init__.py new file mode 100644 index 00000000..66d8ca7a --- /dev/null +++ b/src/gradientai/resources/providers/openai/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py new file mode 100644 index 00000000..44ac8508 --- /dev/null +++ b/src/gradientai/resources/providers/openai/keys.py @@ -0,0 +1,658 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params +from ....types.providers.openai.key_list_response import KeyListResponse +from ....types.providers.openai.key_create_response import KeyCreateResponse +from ....types.providers.openai.key_delete_response import KeyDeleteResponse +from ....types.providers.openai.key_update_response import KeyUpdateResponse +from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/openai/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/openai/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_streamed_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_streamed_response_wrapper( + keys.retrieve_agents, + ) diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/gradientai/resources/providers/openai/openai.py new file mode 100644 index 00000000..b02dc2e1 --- /dev/null +++ b/src/gradientai/resources/providers/openai/openai.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["OpenAIResource", "AsyncOpenAIResource"] + + +class OpenAIResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> OpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return OpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return OpenAIResourceWithStreamingResponse(self) + + +class AsyncOpenAIResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncOpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncOpenAIResourceWithStreamingResponse(self) + + +class OpenAIResourceWithRawResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithRawResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._openai.keys) + + +class OpenAIResourceWithStreamingResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithStreamingResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/gradientai/resources/providers/providers.py b/src/gradientai/resources/providers/providers.py new file mode 100644 index 00000000..ef942f73 --- /dev/null +++ b/src/gradientai/resources/providers/providers.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .openai.openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic.anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = ["ProvidersResource", "AsyncProvidersResource"] + + +class ProvidersResource(SyncAPIResource): + @cached_property + def anthropic(self) -> AnthropicResource: + return AnthropicResource(self._client) + + @cached_property + def openai(self) -> OpenAIResource: + return OpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> ProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ProvidersResourceWithStreamingResponse(self) + + +class AsyncProvidersResource(AsyncAPIResource): + @cached_property + def anthropic(self) -> AsyncAnthropicResource: + return AsyncAnthropicResource(self._client) + + @cached_property + def openai(self) -> AsyncOpenAIResource: + return AsyncOpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncProvidersResourceWithStreamingResponse(self) + + +class ProvidersResourceWithRawResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithRawResponse: + return AnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithRawResponse: + return OpenAIResourceWithRawResponse(self._providers.openai) + + +class AsyncProvidersResourceWithRawResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: + return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithRawResponse: + return AsyncOpenAIResourceWithRawResponse(self._providers.openai) + + +class ProvidersResourceWithStreamingResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithStreamingResponse: + return AnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithStreamingResponse: + return OpenAIResourceWithStreamingResponse(self._providers.openai) + + +class AsyncProvidersResourceWithStreamingResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: + return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: + return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py new file mode 100644 index 00000000..ad760c24 --- /dev/null +++ b/src/gradientai/resources/regions.py @@ -0,0 +1,191 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..types import region_list_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.region_list_response import RegionListResponse + +__all__ = ["RegionsResource", "AsyncRegionsResource"] + + +class RegionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return RegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return RegionsResourceWithStreamingResponse(self) + + def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class AsyncRegionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncRegionsResourceWithStreamingResponse(self) + + async def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class RegionsResourceWithRawResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_raw_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithRawResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_raw_response_wrapper( + regions.list, + ) + + +class RegionsResourceWithStreamingResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_streamed_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithStreamingResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_streamed_response_wrapper( + regions.list, + ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index cb52748c..ee516f83 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -10,29 +10,43 @@ from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_update_params import AgentUpdateParams as AgentUpdateParams +from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .region_list_response import RegionListResponse as RegionListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse +from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse +from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse +from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse +from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py new file mode 100644 index 00000000..eb1d440d --- /dev/null +++ b/src/gradientai/types/agent_delete_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentDeleteResponse"] + + +class AgentDeleteResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py new file mode 100644 index 00000000..2eed88af --- /dev/null +++ b/src/gradientai/types/agent_retrieve_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentRetrieveResponse"] + + +class AgentRetrieveResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py new file mode 100644 index 00000000..85f9a9c2 --- /dev/null +++ b/src/gradientai/types/agent_update_params.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo +from .api_retrieval_method import APIRetrievalMethod + +__all__ = ["AgentUpdateParams"] + + +class AgentUpdateParams(TypedDict, total=False): + anthropic_key_uuid: str + + description: str + + instruction: str + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: int + + max_tokens: int + """ + Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + """ + + model_uuid: str + """Identifier for the foundation model.""" + + name: str + + openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + + project_id: str + + provide_citations: bool + + retrieval_method: APIRetrievalMethod + + tags: List[str] + + temperature: float + """Controls the model’s creativity, specified as a number between 0 and 1. + + Lower values produce more predictable and conservative responses, while higher + values encourage creativity and variation. + """ + + top_p: float + """ + Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + """ + + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py new file mode 100644 index 00000000..2948aa1c --- /dev/null +++ b/src/gradientai/types/agent_update_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentUpdateResponse"] + + +class AgentUpdateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py new file mode 100644 index 00000000..a0cdc0b9 --- /dev/null +++ b/src/gradientai/types/agent_update_status_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo +from .api_deployment_visibility import APIDeploymentVisibility + +__all__ = ["AgentUpdateStatusParams"] + + +class AgentUpdateStatusParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + + visibility: APIDeploymentVisibility diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py new file mode 100644 index 00000000..b200f99d --- /dev/null +++ b/src/gradientai/types/agent_update_status_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentUpdateStatusResponse"] + + +class AgentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 2a7a830e..aae0ee6b 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -11,14 +11,21 @@ from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams from .function_create_params import FunctionCreateParams as FunctionCreateParams from .function_update_params import FunctionUpdateParams as FunctionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse +from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse from .function_create_response import FunctionCreateResponse as FunctionCreateResponse from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse +from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams +from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse +from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse +from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput +from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse diff --git a/src/gradientai/types/agents/child_agent_add_params.py b/src/gradientai/types/agents/child_agent_add_params.py new file mode 100644 index 00000000..001baa6f --- /dev/null +++ b/src/gradientai/types/agents/child_agent_add_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["ChildAgentAddParams"] + + +class ChildAgentAddParams(TypedDict, total=False): + path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] + + body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + + if_case: str + + body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] + """A unique identifier for the parent agent.""" + + route_name: str diff --git a/src/gradientai/types/agents/child_agent_add_response.py b/src/gradientai/types/agents/child_agent_add_response.py new file mode 100644 index 00000000..baccec10 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_add_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentAddResponse"] + + +class ChildAgentAddResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None + """A unique identifier for the parent agent.""" diff --git a/src/gradientai/types/agents/child_agent_delete_response.py b/src/gradientai/types/agents/child_agent_delete_response.py new file mode 100644 index 00000000..b50fb024 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentDeleteResponse"] + + +class ChildAgentDeleteResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/child_agent_update_params.py b/src/gradientai/types/agents/child_agent_update_params.py new file mode 100644 index 00000000..2f009a52 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_update_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["ChildAgentUpdateParams"] + + +class ChildAgentUpdateParams(TypedDict, total=False): + path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] + + body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + + if_case: str + + body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] + """A unique identifier for the parent agent.""" + + route_name: str + + uuid: str diff --git a/src/gradientai/types/agents/child_agent_update_response.py b/src/gradientai/types/agents/child_agent_update_response.py new file mode 100644 index 00000000..48a13c72 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_update_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentUpdateResponse"] + + +class ChildAgentUpdateResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None + """A unique identifier for the parent agent.""" + + rollback: Optional[bool] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/child_agent_view_response.py b/src/gradientai/types/agents/child_agent_view_response.py new file mode 100644 index 00000000..ffbaef12 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_view_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentViewResponse"] + + +class ChildAgentViewResponse(BaseModel): + children: Optional[List["APIAgent"]] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py new file mode 100644 index 00000000..76bb4236 --- /dev/null +++ b/src/gradientai/types/agents/knowledge_base_detach_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["KnowledgeBaseDetachResponse"] + + +class KnowledgeBaseDetachResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/api_key_list_params.py b/src/gradientai/types/api_key_list_params.py new file mode 100644 index 00000000..a1ab60dc --- /dev/null +++ b/src/gradientai/types/api_key_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" + + public_only: bool + """only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/gradientai/types/api_key_list_response.py b/src/gradientai/types/api_key_list_response.py new file mode 100644 index 00000000..360de7a4 --- /dev/null +++ b/src/gradientai/types/api_key_list_response.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_model_version import APIModelVersion + +__all__ = ["APIKeyListResponse", "Model"] + + +class Model(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None + + +class APIKeyListResponse(BaseModel): + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + models: Optional[List[Model]] = None diff --git a/src/gradientai/types/api_keys/__init__.py b/src/gradientai/types/api_keys/__init__.py index f8ee8b14..c3cbcd6d 100644 --- a/src/gradientai/types/api_keys/__init__.py +++ b/src/gradientai/types/api_keys/__init__.py @@ -1,3 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/api_keys/api_key_create_params.py b/src/gradientai/types/api_keys/api_key_create_params.py new file mode 100644 index 00000000..16cc23c9 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_create_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyCreateParams"] + + +class APIKeyCreateParams(TypedDict, total=False): + name: str diff --git a/src/gradientai/types/api_keys/api_key_create_response.py b/src/gradientai/types/api_keys/api_key_create_response.py new file mode 100644 index 00000000..654e9f1e --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyCreateResponse"] + + +class APIKeyCreateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_key_delete_response.py b/src/gradientai/types/api_keys/api_key_delete_response.py new file mode 100644 index 00000000..4d81d047 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_key_list_params.py b/src/gradientai/types/api_keys/api_key_list_params.py new file mode 100644 index 00000000..11da9398 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/api_keys/api_key_list_response.py b/src/gradientai/types/api_keys/api_key_list_response.py new file mode 100644 index 00000000..535e2f96 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/api_keys/api_key_update_params.py b/src/gradientai/types/api_keys/api_key_update_params.py new file mode 100644 index 00000000..23c1c0b9 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["APIKeyUpdateParams"] + + +class APIKeyUpdateParams(TypedDict, total=False): + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/api_keys/api_key_update_regenerate_response.py b/src/gradientai/types/api_keys/api_key_update_regenerate_response.py new file mode 100644 index 00000000..44a316dc --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_update_regenerate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyUpdateRegenerateResponse"] + + +class APIKeyUpdateRegenerateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_key_update_response.py b/src/gradientai/types/api_keys/api_key_update_response.py new file mode 100644 index 00000000..3671addf --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyUpdateResponse"] + + +class APIKeyUpdateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_model_api_key_info.py b/src/gradientai/types/api_keys/api_model_api_key_info.py new file mode 100644 index 00000000..bf354a47 --- /dev/null +++ b/src/gradientai/types/api_keys/api_model_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["APIModelAPIKeyInfo"] + + +class APIModelAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + secret_key: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/auth/agents/__init__.py b/src/gradientai/types/auth/agents/__init__.py index f8ee8b14..9fae55b6 100644 --- a/src/gradientai/types/auth/agents/__init__.py +++ b/src/gradientai/types/auth/agents/__init__.py @@ -1,3 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .token_create_params import TokenCreateParams as TokenCreateParams +from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/gradientai/types/auth/agents/token_create_params.py b/src/gradientai/types/auth/agents/token_create_params.py new file mode 100644 index 00000000..0df640f9 --- /dev/null +++ b/src/gradientai/types/auth/agents/token_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["TokenCreateParams"] + + +class TokenCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/gradientai/types/auth/agents/token_create_response.py b/src/gradientai/types/auth/agents/token_create_response.py new file mode 100644 index 00000000..e58b7399 --- /dev/null +++ b/src/gradientai/types/auth/agents/token_create_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["TokenCreateResponse"] + + +class TokenCreateResponse(BaseModel): + access_token: Optional[str] = None + + refresh_token: Optional[str] = None diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py new file mode 100644 index 00000000..6401e25a --- /dev/null +++ b/src/gradientai/types/knowledge_base_delete_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["KnowledgeBaseDeleteResponse"] + + +class KnowledgeBaseDeleteResponse(BaseModel): + uuid: Optional[str] = None diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py new file mode 100644 index 00000000..5a3b5f2c --- /dev/null +++ b/src/gradientai/types/knowledge_base_retrieve_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseRetrieveResponse"] + + +class KnowledgeBaseRetrieveResponse(BaseModel): + database_status: Optional[ + Literal[ + "CREATING", + "ONLINE", + "POWEROFF", + "REBUILDING", + "REBALANCING", + "DECOMMISSIONED", + "FORKING", + "MIGRATING", + "RESIZING", + "RESTORING", + "POWERING_ON", + "UNHEALTHY", + ] + ] = None + + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py new file mode 100644 index 00000000..297c79de --- /dev/null +++ b/src/gradientai/types/knowledge_base_update_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["KnowledgeBaseUpdateParams"] + + +class KnowledgeBaseUpdateParams(TypedDict, total=False): + database_id: str + """the id of the DigitalOcean database this knowledge base will use, optiona.""" + + embedding_model_uuid: str + """Identifier for the foundation model.""" + + name: str + + project_id: str + + tags: List[str] + """Tags to organize your knowledge base.""" + + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py new file mode 100644 index 00000000..f3ba2c32 --- /dev/null +++ b/src/gradientai/types/knowledge_base_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseUpdateResponse"] + + +class KnowledgeBaseUpdateResponse(BaseModel): + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index e716e1f6..f5f31034 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -9,6 +9,7 @@ from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse +from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py new file mode 100644 index 00000000..53954d7f --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["DataSourceDeleteResponse"] + + +class DataSourceDeleteResponse(BaseModel): + data_source_uuid: Optional[str] = None + + knowledge_base_uuid: Optional[str] = None diff --git a/src/gradientai/types/providers/anthropic/__init__.py b/src/gradientai/types/providers/anthropic/__init__.py index f8ee8b14..eb47e709 100644 --- a/src/gradientai/types/providers/anthropic/__init__.py +++ b/src/gradientai/types/providers/anthropic/__init__.py @@ -1,3 +1,14 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams +from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/gradientai/types/providers/anthropic/key_create_params.py b/src/gradientai/types/providers/anthropic/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/gradientai/types/providers/anthropic/key_create_response.py b/src/gradientai/types/providers/anthropic/key_create_response.py new file mode 100644 index 00000000..a032810c --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/anthropic/key_delete_response.py b/src/gradientai/types/providers/anthropic/key_delete_response.py new file mode 100644 index 00000000..2afe2dda --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_params.py b/src/gradientai/types/providers/anthropic/key_list_agents_params.py new file mode 100644 index 00000000..ebbc3b7e --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListAgentsParams"] + + +class KeyListAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/gradientai/types/providers/anthropic/key_list_agents_response.py new file mode 100644 index 00000000..ba6ca946 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_agents_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyListAgentsResponse"] + + +class KeyListAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent diff --git a/src/gradientai/types/providers/anthropic/key_list_params.py b/src/gradientai/types/providers/anthropic/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/gradientai/types/providers/anthropic/key_list_response.py new file mode 100644 index 00000000..d0b84e96 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/providers/anthropic/key_retrieve_response.py b/src/gradientai/types/providers/anthropic/key_retrieve_response.py new file mode 100644 index 00000000..b8361fc2 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/anthropic/key_update_params.py b/src/gradientai/types/providers/anthropic/key_update_params.py new file mode 100644 index 00000000..c07d7f66 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/providers/anthropic/key_update_response.py b/src/gradientai/types/providers/anthropic/key_update_response.py new file mode 100644 index 00000000..b04277a6 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/__init__.py b/src/gradientai/types/providers/openai/__init__.py index f8ee8b14..70abf332 100644 --- a/src/gradientai/types/providers/openai/__init__.py +++ b/src/gradientai/types/providers/openai/__init__.py @@ -1,3 +1,14 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams +from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/gradientai/types/providers/openai/key_create_params.py b/src/gradientai/types/providers/openai/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/gradientai/types/providers/openai/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/gradientai/types/providers/openai/key_create_response.py b/src/gradientai/types/providers/openai/key_create_response.py new file mode 100644 index 00000000..f3b4d36c --- /dev/null +++ b/src/gradientai/types/providers/openai/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/key_delete_response.py b/src/gradientai/types/providers/openai/key_delete_response.py new file mode 100644 index 00000000..0c8922bb --- /dev/null +++ b/src/gradientai/types/providers/openai/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/key_list_params.py b/src/gradientai/types/providers/openai/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/gradientai/types/providers/openai/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/gradientai/types/providers/openai/key_list_response.py new file mode 100644 index 00000000..c263cba3 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/gradientai/types/providers/openai/key_retrieve_agents_params.py new file mode 100644 index 00000000..ec745d14 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyRetrieveAgentsParams"] + + +class KeyRetrieveAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py new file mode 100644 index 00000000..f42edea6 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyRetrieveAgentsResponse"] + + +class KeyRetrieveAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent diff --git a/src/gradientai/types/providers/openai/key_retrieve_response.py b/src/gradientai/types/providers/openai/key_retrieve_response.py new file mode 100644 index 00000000..7015b6f7 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/key_update_params.py b/src/gradientai/types/providers/openai/key_update_params.py new file mode 100644 index 00000000..c07d7f66 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/providers/openai/key_update_response.py b/src/gradientai/types/providers/openai/key_update_response.py new file mode 100644 index 00000000..4889f994 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py new file mode 100644 index 00000000..1db0ad50 --- /dev/null +++ b/src/gradientai/types/region_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["RegionListParams"] + + +class RegionListParams(TypedDict, total=False): + serves_batch: bool + """include datacenters that are capable of running batch jobs.""" + + serves_inference: bool + """include datacenters that serve inference.""" diff --git a/src/gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py new file mode 100644 index 00000000..0f955b36 --- /dev/null +++ b/src/gradientai/types/region_list_response.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel + +__all__ = ["RegionListResponse", "Region"] + + +class Region(BaseModel): + inference_url: Optional[str] = None + + region: Optional[str] = None + + serves_batch: Optional[bool] = None + + serves_inference: Optional[bool] = None + + stream_inference_url: Optional[str] = None + + +class RegionListResponse(BaseModel): + regions: Optional[List[Region]] = None diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py new file mode 100644 index 00000000..14af3b93 --- /dev/null +++ b/tests/api_resources/agents/test_child_agents.py @@ -0,0 +1,485 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import ( + ChildAgentAddResponse, + ChildAgentViewResponse, + ChildAgentDeleteResponse, + ChildAgentUpdateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestChildAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="", + parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_add_with_all_params(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_view(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.view( + "uuid", + ) + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_view(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.view( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_view(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.view( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_view(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.child_agents.with_raw_response.view( + "", + ) + + +class TestAsyncChildAgents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + await async_client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="", + parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + await async_client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_view(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.view( + "uuid", + ) + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.view( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.view( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.view( + "", + ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index c8b5541d..dff80a9a 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -59,6 +59,110 @@ def test_path_params_attach(self, client: GradientAI) -> None: "", ) + @pytest.mark.skip() + @parametrize + def test_method_attach_single(self, client: GradientAI) -> None: + knowledge_base = client.agents.knowledge_bases.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_attach_single(self, client: GradientAI) -> None: + response = client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_attach_single(self, client: GradientAI) -> None: + with client.agents.knowledge_bases.with_streaming_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_attach_single(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_detach(self, client: GradientAI) -> None: + knowledge_base = client.agents.knowledge_bases.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_detach(self, client: GradientAI) -> None: + response = client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_detach(self, client: GradientAI) -> None: + with client.agents.knowledge_bases.with_streaming_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_detach(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncKnowledgeBases: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -104,3 +208,107 @@ async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: await async_client.agents.knowledge_bases.with_raw_response.attach( "", ) + + @pytest.mark.skip() + @parametrize + async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.agents.knowledge_bases.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_detach(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.agents.knowledge_bases.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.knowledge_bases.with_streaming_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/api_keys/__init__.py b/tests/api_resources/api_keys/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/api_keys/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/api_keys/test_api_keys_.py b/tests/api_resources/api_keys/test_api_keys_.py new file mode 100644 index 00000000..01e8dcfa --- /dev/null +++ b/tests/api_resources/api_keys/test_api_keys_.py @@ -0,0 +1,446 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.api_keys import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, + APIKeyUpdateRegenerateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.list( + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_regenerate(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_regenerate(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_regenerate(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.api_keys.with_raw_response.update_regenerate( + "", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.list( + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.api_keys.with_raw_response.update_regenerate( + "", + ) diff --git a/tests/api_resources/auth/__init__.py b/tests/api_resources/auth/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/auth/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/__init__.py b/tests/api_resources/auth/agents/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/auth/agents/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py new file mode 100644 index 00000000..ef721cd0 --- /dev/null +++ b/tests/api_resources/auth/agents/test_token.py @@ -0,0 +1,124 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.auth.agents import TokenCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestToken: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + token = client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + token = client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.auth.agents.token.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + token = response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.auth.agents.token.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + token = response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.auth.agents.token.with_raw_response.create( + path_agent_uuid="", + ) + + +class TestAsyncToken: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + token = await async_client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + token = await async_client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.auth.agents.token.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + token = await response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.auth.agents.token.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + token = await response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.auth.agents.token.with_raw_response.create( + path_agent_uuid="", + ) diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index cc90a9d7..ce9c390e 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -12,6 +12,7 @@ from gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, + DataSourceDeleteResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -142,6 +143,58 @@ def test_path_params_list(self, client: GradientAI) -> None: knowledge_base_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): + client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="", + knowledge_base_uuid="knowledge_base_uuid", + ) + class TestAsyncDataSources: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -267,3 +320,55 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: await async_client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="", ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): + await async_client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="", + knowledge_base_uuid="knowledge_base_uuid", + ) diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/providers/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/anthropic/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py new file mode 100644 index 00000000..fab973bf --- /dev/null +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.providers.anthropic import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyListAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_agents(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_agents(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/openai/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py new file mode 100644 index 00000000..1bb270b1 --- /dev/null +++ b/tests/api_resources/providers/openai/test_keys.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.providers.openai import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyRetrieveAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index d88d4791..f39ac4d5 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -9,7 +9,14 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import AgentListResponse, AgentCreateResponse +from gradientai.types import ( + AgentListResponse, + AgentCreateResponse, + AgentDeleteResponse, + AgentUpdateResponse, + AgentRetrieveResponse, + AgentUpdateStatusResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -62,6 +69,113 @@ def test_streaming_response_create(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + agent = client.agents.retrieve( + "uuid", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + agent = client.agents.update( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.update( + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + provide_citations=True, + retrieval_method="RETRIEVAL_METHOD_UNKNOWN", + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.agents.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -100,6 +214,100 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + agent = client.agents.delete( + "uuid", + ) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_status(self, client: GradientAI) -> None: + agent = client.agents.update_status( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_status_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.update_status( + path_uuid="uuid", + body_uuid="uuid", + visibility="VISIBILITY_UNKNOWN", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_status(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.update_status( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_status(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.update_status( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_status(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.agents.with_raw_response.update_status( + path_uuid="", + ) + class TestAsyncAgents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -149,6 +357,113 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.retrieve( + "uuid", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update( + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + provide_citations=True, + retrieval_method="RETRIEVAL_METHOD_UNKNOWN", + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.agents.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -186,3 +501,97 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(AgentListResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.delete( + "uuid", + ) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update_status( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update_status( + path_uuid="uuid", + body_uuid="uuid", + visibility="VISIBILITY_UNKNOWN", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.update_status( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.update_status( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.agents.with_raw_response.update_status( + path_uuid="", + ) diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/test_api_keys.py new file mode 100644 index 00000000..fa1895c9 --- /dev/null +++ b/tests/api_resources/test_api_keys.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import APIKeyListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + api_key = client.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index bf761cf2..e204f9fe 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -12,6 +12,9 @@ from gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, + KnowledgeBaseDeleteResponse, + KnowledgeBaseUpdateResponse, + KnowledgeBaseRetrieveResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -84,6 +87,104 @@ def test_streaming_response_create(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.retrieve( + "uuid", + ) + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.knowledge_bases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.update( + path_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.update( + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.knowledge_bases.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -121,6 +222,48 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.delete( + "uuid", + ) + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.knowledge_bases.with_raw_response.delete( + "", + ) + class TestAsyncKnowledgeBases: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -189,6 +332,104 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.retrieve( + "uuid", + ) + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.update( + path_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.update( + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -225,3 +466,45 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.delete( + "uuid", + ) + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py new file mode 100644 index 00000000..64c84612 --- /dev/null +++ b/tests/api_resources/test_regions.py @@ -0,0 +1,96 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import RegionListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRegions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + region = client.regions.list() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + region = client.regions.list( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.regions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.regions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncRegions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + region = await async_client.regions.list() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + region = await async_client.regions.list( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = await response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = await response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True From 18a6d20c4e1e514446fd13b4eadd35868f37ab45 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 05:43:32 +0000 Subject: [PATCH 031/200] chore: update SDK settings --- .github/workflows/create-releases.yml | 38 ------------------- .github/workflows/publish-pypi.yml | 8 +++- .github/workflows/release-doctor.yml | 3 +- .stats.yml | 2 +- CONTRIBUTING.md | 4 +- README.md | 6 +-- bin/check-release-environment | 4 -- pyproject.toml | 6 +-- src/gradientai/resources/agents/agents.py | 8 ++-- src/gradientai/resources/agents/api_keys.py | 8 ++-- .../resources/agents/child_agents.py | 8 ++-- src/gradientai/resources/agents/functions.py | 8 ++-- .../resources/agents/knowledge_bases.py | 8 ++-- src/gradientai/resources/agents/versions.py | 8 ++-- src/gradientai/resources/api_keys/api_keys.py | 8 ++-- .../resources/api_keys/api_keys_.py | 8 ++-- .../resources/auth/agents/agents.py | 8 ++-- src/gradientai/resources/auth/agents/token.py | 8 ++-- src/gradientai/resources/auth/auth.py | 8 ++-- src/gradientai/resources/chat.py | 8 ++-- src/gradientai/resources/embeddings.py | 8 ++-- src/gradientai/resources/indexing_jobs.py | 8 ++-- .../resources/knowledge_bases/data_sources.py | 8 ++-- .../knowledge_bases/knowledge_bases.py | 8 ++-- src/gradientai/resources/models.py | 8 ++-- .../providers/anthropic/anthropic.py | 8 ++-- .../resources/providers/anthropic/keys.py | 8 ++-- .../resources/providers/openai/keys.py | 8 ++-- .../resources/providers/openai/openai.py | 8 ++-- .../resources/providers/providers.py | 8 ++-- src/gradientai/resources/regions.py | 8 ++-- 31 files changed, 108 insertions(+), 147 deletions(-) delete mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index 04dac49f..00000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Install Rye - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index bff3a970..34110cd4 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to PyPI in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 94e02117..9845ae8d 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,5 +18,4 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 74cbd5c9..54f59bb8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 69dc66269416b2e01e8852b5a6788b97 +config_hash: 53eac5170a4d8967367b33767544a858 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 086907ef..fe7e0d7c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git +$ pip install git+ssh://git@github.com/digitalocean/genai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 10236f18..bf235be1 100644 --- a/README.md +++ b/README.md @@ -235,9 +235,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -343,7 +343,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/bin/check-release-environment b/bin/check-release-environment index 78967e8b..b1bd8969 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi diff --git a/pyproject.toml b/pyproject.toml index 9c6fdd19..8f36a952 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/gradientai-python" -Repository = "https://github.com/digitalocean/gradientai-python" +Homepage = "https://github.com/digitalocean/genai-python" +Repository = "https://github.com/digitalocean/genai-python" [tool.rye] @@ -122,7 +122,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 87e2aeca..036abf75 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -104,7 +104,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -460,7 +460,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -469,7 +469,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 7180503f..4470850c 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -268,7 +268,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -277,7 +277,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py index 1f7fe3ce..163e52cf 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChildAgentsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChildAgentsResourceWithStreamingResponse(self) @@ -237,7 +237,7 @@ def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChildAgentsResourceWithRawResponse(self) @@ -246,7 +246,7 @@ def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChildAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 6de9b141..19c63d8c 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> FunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FunctionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FunctionsResourceWithStreamingResponse(self) @@ -199,7 +199,7 @@ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFunctionsResourceWithRawResponse(self) @@ -208,7 +208,7 @@ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFunctionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index 97b086e0..a400c56a 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -160,7 +160,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -169,7 +169,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index d71da8df..e77a252b 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -143,7 +143,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -152,7 +152,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py index aecccfc3..355cea17 100644 --- a/src/gradientai/resources/api_keys/api_keys.py +++ b/src/gradientai/resources/api_keys/api_keys.py @@ -36,7 +36,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -45,7 +45,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -135,7 +135,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -144,7 +144,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys/api_keys_.py index 969bcfb9..03d70150 100644 --- a/src/gradientai/resources/api_keys/api_keys_.py +++ b/src/gradientai/resources/api_keys/api_keys_.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -242,7 +242,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -251,7 +251,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/auth/agents/agents.py b/src/gradientai/resources/auth/agents/agents.py index 52426560..a0aa9faf 100644 --- a/src/gradientai/resources/auth/agents/agents.py +++ b/src/gradientai/resources/auth/agents/agents.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/auth/agents/token.py b/src/gradientai/resources/auth/agents/token.py index 26de7c06..f39c892d 100644 --- a/src/gradientai/resources/auth/agents/token.py +++ b/src/gradientai/resources/auth/agents/token.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> TokenResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return TokenResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> TokenResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return TokenResourceWithStreamingResponse(self) @@ -85,7 +85,7 @@ def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncTokenResourceWithRawResponse(self) @@ -94,7 +94,7 @@ def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncTokenResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/auth/auth.py b/src/gradientai/resources/auth/auth.py index 7a502a2c..985fc56c 100644 --- a/src/gradientai/resources/auth/auth.py +++ b/src/gradientai/resources/auth/auth.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AuthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AuthResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AuthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AuthResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAuthResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAuthResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/chat.py b/src/gradientai/resources/chat.py index 223e7cf3..518fbad8 100644 --- a/src/gradientai/resources/chat.py +++ b/src/gradientai/resources/chat.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -191,7 +191,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -200,7 +200,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/embeddings.py b/src/gradientai/resources/embeddings.py index 36ffe3c6..1bcd3145 100644 --- a/src/gradientai/resources/embeddings.py +++ b/src/gradientai/resources/embeddings.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -101,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -110,7 +110,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py index 6647d36c..d0b933e8 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -34,7 +34,7 @@ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return IndexingJobsResourceWithRawResponse(self) @@ -43,7 +43,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return IndexingJobsResourceWithStreamingResponse(self) @@ -250,7 +250,7 @@ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncIndexingJobsResourceWithRawResponse(self) @@ -259,7 +259,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncIndexingJobsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index a1d2c575..68714895 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> DataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return DataSourcesResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return DataSourcesResourceWithStreamingResponse(self) @@ -196,7 +196,7 @@ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncDataSourcesResourceWithRawResponse(self) @@ -205,7 +205,7 @@ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncDataSourcesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 7d4f38e3..f73ab08c 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -46,7 +46,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -55,7 +55,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -312,7 +312,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -321,7 +321,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index c30e1135..81b75441 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/gradientai/resources/providers/anthropic/anthropic.py index 23a914e9..64783563 100644 --- a/src/gradientai/resources/providers/anthropic/anthropic.py +++ b/src/gradientai/resources/providers/anthropic/anthropic.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py index 8fbb64db..1b11fc99 100644 --- a/src/gradientai/resources/providers/anthropic/keys.py +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -303,7 +303,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -312,7 +312,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py index 44ac8508..abcb22f0 100644 --- a/src/gradientai/resources/providers/openai/keys.py +++ b/src/gradientai/resources/providers/openai/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -301,7 +301,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -310,7 +310,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/gradientai/resources/providers/openai/openai.py index b02dc2e1..d29fd062 100644 --- a/src/gradientai/resources/providers/openai/openai.py +++ b/src/gradientai/resources/providers/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/providers.py b/src/gradientai/resources/providers/providers.py index ef942f73..50e3db1a 100644 --- a/src/gradientai/resources/providers/providers.py +++ b/src/gradientai/resources/providers/providers.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index ad760c24..bbf07c3e 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> RegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return RegionsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return RegionsResourceWithStreamingResponse(self) @@ -95,7 +95,7 @@ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncRegionsResourceWithRawResponse(self) @@ -104,7 +104,7 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncRegionsResourceWithStreamingResponse(self) From f6a6e5d99cb924a70ee25f9bd8fbfadca4c83dcb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:46:26 +0000 Subject: [PATCH 032/200] chore(tests): add tests for httpx client instantiation & proxies --- tests/test_client.py | 46 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tests/test_client.py b/tests/test_client.py index 59eee2ff..4cf52324 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -30,6 +30,8 @@ DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + DefaultHttpxClient, + DefaultAsyncHttpxClient, make_request_options, ) @@ -826,6 +828,28 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects @@ -1679,6 +1703,28 @@ async def test_main() -> None: time.sleep(0.1) + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) async def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects From be3eafe76febf599f7fc4af4e96ca3df734f8483 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 04:16:01 +0000 Subject: [PATCH 033/200] chore(internal): update conftest.py --- tests/conftest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 04c66a33..8432d29e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,5 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import os From f15a07cf9c187b820432bbd410b7640f2339c557 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 06:46:58 +0000 Subject: [PATCH 034/200] chore(ci): enable for pull requests --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88980c93..08bd7a02 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,10 @@ on: - 'integrated/**' - 'stl-preview-head/**' - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: From e49ec6e7b86add2d2574f68aba44712ebb66034a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 12:12:44 +0000 Subject: [PATCH 035/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 10 +- src/gradientai/_client.py | 38 -- src/gradientai/resources/__init__.py | 14 - src/gradientai/resources/chat.py | 381 ------------------ src/gradientai/types/__init__.py | 6 - ...request_message_content_part_text_param.py | 15 - .../types/chat_completion_token_logprob.py | 57 --- .../types/chat_create_completion_params.py | 208 ---------- .../types/chat_create_completion_response.py | 81 ---- tests/api_resources/test_chat.py | 184 --------- 11 files changed, 3 insertions(+), 995 deletions(-) delete mode 100644 src/gradientai/resources/chat.py delete mode 100644 src/gradientai/types/chat_completion_request_message_content_part_text_param.py delete mode 100644 src/gradientai/types/chat_completion_token_logprob.py delete mode 100644 src/gradientai/types/chat_create_completion_params.py delete mode 100644 src/gradientai/types/chat_create_completion_response.py delete mode 100644 tests/api_resources/test_chat.py diff --git a/.stats.yml b/.stats.yml index 54f59bb8..297debd9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 60 +configured_endpoints: 59 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 53eac5170a4d8967367b33767544a858 +config_hash: 3e04a2c7a4b0b9b16bd2956a3208b942 diff --git a/api.md b/api.md index a3d3e8c1..6a543de5 100644 --- a/api.md +++ b/api.md @@ -306,17 +306,9 @@ Methods: Types: ```python -from gradientai.types import ( - ChatCompletionRequestMessageContentPartText, - ChatCompletionTokenLogprob, - ChatCreateCompletionResponse, -) +from gradientai.types import ChatCompletionRequestMessageContentPartText, ChatCompletionTokenLogprob ``` -Methods: - -- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse - # Embeddings Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 5c0172c1..9bf55fd7 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -33,7 +33,6 @@ if TYPE_CHECKING: from .resources import ( auth, - chat, agents, models, regions, @@ -43,7 +42,6 @@ indexing_jobs, knowledge_bases, ) - from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.auth.auth import AuthResource, AsyncAuthResource @@ -163,12 +161,6 @@ def api_keys(self) -> APIKeysResource: return APIKeysResource(self) - @cached_property - def chat(self) -> ChatResource: - from .resources.chat import ChatResource - - return ChatResource(self) - @cached_property def embeddings(self) -> EmbeddingsResource: from .resources.embeddings import EmbeddingsResource @@ -391,12 +383,6 @@ def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self) - @cached_property - def chat(self) -> AsyncChatResource: - from .resources.chat import AsyncChatResource - - return AsyncChatResource(self) - @cached_property def embeddings(self) -> AsyncEmbeddingsResource: from .resources.embeddings import AsyncEmbeddingsResource @@ -570,12 +556,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.ChatResourceWithRawResponse: - from .resources.chat import ChatResourceWithRawResponse - - return ChatResourceWithRawResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse: from .resources.embeddings import EmbeddingsResourceWithRawResponse @@ -637,12 +617,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.AsyncChatResourceWithRawResponse: - from .resources.chat import AsyncChatResourceWithRawResponse - - return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse: from .resources.embeddings import AsyncEmbeddingsResourceWithRawResponse @@ -704,12 +678,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.ChatResourceWithStreamingResponse: - from .resources.chat import ChatResourceWithStreamingResponse - - return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse: from .resources.embeddings import EmbeddingsResourceWithStreamingResponse @@ -771,12 +739,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: - from .resources.chat import AsyncChatResourceWithStreamingResponse - - return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse: from .resources.embeddings import AsyncEmbeddingsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 6dcbff02..05417215 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,14 +8,6 @@ AuthResourceWithStreamingResponse, AsyncAuthResourceWithStreamingResponse, ) -from .chat import ( - ChatResource, - AsyncChatResource, - ChatResourceWithRawResponse, - AsyncChatResourceWithRawResponse, - ChatResourceWithStreamingResponse, - AsyncChatResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -124,12 +116,6 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", "EmbeddingsResource", "AsyncEmbeddingsResource", "EmbeddingsResourceWithRawResponse", diff --git a/src/gradientai/resources/chat.py b/src/gradientai/resources/chat.py deleted file mode 100644 index 518fbad8..00000000 --- a/src/gradientai/resources/chat.py +++ /dev/null @@ -1,381 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional - -import httpx - -from ..types import chat_create_completion_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.chat_create_completion_response import ChatCreateCompletionResponse - -__all__ = ["ChatResource", "AsyncChatResource"] - - -class ChatResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ChatResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ChatResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ChatResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ChatResourceWithStreamingResponse(self) - - def create_completion( - self, - *, - messages: Iterable[chat_create_completion_params.Message], - model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[chat_create_completion_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChatCreateCompletionResponse: - """ - Creates a model response for the given chat conversation. - - Args: - messages: A list of messages comprising the conversation so far. - - model: Model ID used to generate the response. - - frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their - existing frequency in the text so far, decreasing the model's likelihood to - repeat the same line verbatim. - - logit_bias: Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the - tokenizer) to an associated bias value from -100 to 100. Mathematically, the - bias is added to the logits generated by the model prior to sampling. The exact - effect will vary per model, but values between -1 and 1 should decrease or - increase likelihood of selection; values like -100 or 100 should result in a ban - or exclusive selection of the relevant token. - - logprobs: Whether to return log probabilities of the output tokens or not. If true, - returns the log probabilities of each output token returned in the `content` of - `message`. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. - - max_tokens: The maximum number of tokens that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - n: How many chat completion choices to generate for each input message. Note that - you will be charged based on the number of generated tokens across all of the - choices. Keep `n` as `1` to minimize costs. - - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on - whether they appear in the text so far, increasing the model's likelihood to - talk about new topics. - - stop: Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - - stream_options: Options for streaming response. Only set this when you set `stream: true`. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. We generally recommend altering this or `top_p` but - not both. - - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to - return at each token position, each with an associated log probability. - `logprobs` must be set to `true` if this parameter is used. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/chat/completions", - body=maybe_transform( - { - "messages": messages, - "model": model, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_completion_tokens": max_completion_tokens, - "max_tokens": max_tokens, - "metadata": metadata, - "n": n, - "presence_penalty": presence_penalty, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "temperature": temperature, - "top_logprobs": top_logprobs, - "top_p": top_p, - "user": user, - }, - chat_create_completion_params.ChatCreateCompletionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChatCreateCompletionResponse, - ) - - -class AsyncChatResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncChatResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncChatResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncChatResourceWithStreamingResponse(self) - - async def create_completion( - self, - *, - messages: Iterable[chat_create_completion_params.Message], - model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[chat_create_completion_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChatCreateCompletionResponse: - """ - Creates a model response for the given chat conversation. - - Args: - messages: A list of messages comprising the conversation so far. - - model: Model ID used to generate the response. - - frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their - existing frequency in the text so far, decreasing the model's likelihood to - repeat the same line verbatim. - - logit_bias: Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the - tokenizer) to an associated bias value from -100 to 100. Mathematically, the - bias is added to the logits generated by the model prior to sampling. The exact - effect will vary per model, but values between -1 and 1 should decrease or - increase likelihood of selection; values like -100 or 100 should result in a ban - or exclusive selection of the relevant token. - - logprobs: Whether to return log probabilities of the output tokens or not. If true, - returns the log probabilities of each output token returned in the `content` of - `message`. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. - - max_tokens: The maximum number of tokens that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - n: How many chat completion choices to generate for each input message. Note that - you will be charged based on the number of generated tokens across all of the - choices. Keep `n` as `1` to minimize costs. - - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on - whether they appear in the text so far, increasing the model's likelihood to - talk about new topics. - - stop: Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - - stream_options: Options for streaming response. Only set this when you set `stream: true`. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. We generally recommend altering this or `top_p` but - not both. - - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to - return at each token position, each with an associated log probability. - `logprobs` must be set to `true` if this parameter is used. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/chat/completions", - body=await async_maybe_transform( - { - "messages": messages, - "model": model, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_completion_tokens": max_completion_tokens, - "max_tokens": max_tokens, - "metadata": metadata, - "n": n, - "presence_penalty": presence_penalty, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "temperature": temperature, - "top_logprobs": top_logprobs, - "top_p": top_p, - "user": user, - }, - chat_create_completion_params.ChatCreateCompletionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChatCreateCompletionResponse, - ) - - -class ChatResourceWithRawResponse: - def __init__(self, chat: ChatResource) -> None: - self._chat = chat - - self.create_completion = to_raw_response_wrapper( - chat.create_completion, - ) - - -class AsyncChatResourceWithRawResponse: - def __init__(self, chat: AsyncChatResource) -> None: - self._chat = chat - - self.create_completion = async_to_raw_response_wrapper( - chat.create_completion, - ) - - -class ChatResourceWithStreamingResponse: - def __init__(self, chat: ChatResource) -> None: - self._chat = chat - - self.create_completion = to_streamed_response_wrapper( - chat.create_completion, - ) - - -class AsyncChatResourceWithStreamingResponse: - def __init__(self, chat: AsyncChatResource) -> None: - self._chat = chat - - self.create_completion = async_to_streamed_response_wrapper( - chat.create_completion, - ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index ee516f83..6992d67b 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -39,19 +39,13 @@ from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob -from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse -from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) -from .chat_completion_request_message_content_part_text_param import ( - ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, -) diff --git a/src/gradientai/types/chat_completion_request_message_content_part_text_param.py b/src/gradientai/types/chat_completion_request_message_content_part_text_param.py deleted file mode 100644 index 4aec9488..00000000 --- a/src/gradientai/types/chat_completion_request_message_content_part_text_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ChatCompletionRequestMessageContentPartTextParam"] - - -class ChatCompletionRequestMessageContentPartTextParam(TypedDict, total=False): - text: Required[str] - """The text content.""" - - type: Required[Literal["text"]] - """The type of the content part.""" diff --git a/src/gradientai/types/chat_completion_token_logprob.py b/src/gradientai/types/chat_completion_token_logprob.py deleted file mode 100644 index 78de1dfa..00000000 --- a/src/gradientai/types/chat_completion_token_logprob.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel - -__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] - - -class TopLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - -class ChatCompletionTokenLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - top_logprobs: List[TopLogprob] - """List of the most likely tokens and their log probability, at this token - position. - - In rare cases, there may be fewer than the number of requested `top_logprobs` - returned. - """ diff --git a/src/gradientai/types/chat_create_completion_params.py b/src/gradientai/types/chat_create_completion_params.py deleted file mode 100644 index 05c427b1..00000000 --- a/src/gradientai/types/chat_create_completion_params.py +++ /dev/null @@ -1,208 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .chat_completion_request_message_content_part_text_param import ChatCompletionRequestMessageContentPartTextParam - -__all__ = [ - "ChatCreateCompletionParams", - "Message", - "MessageChatCompletionRequestSystemMessage", - "MessageChatCompletionRequestDeveloperMessage", - "MessageChatCompletionRequestUserMessage", - "MessageChatCompletionRequestAssistantMessage", - "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", - "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal", - "StreamOptions", -] - - -class ChatCreateCompletionParams(TypedDict, total=False): - messages: Required[Iterable[Message]] - """A list of messages comprising the conversation so far.""" - - model: Required[str] - """Model ID used to generate the response.""" - - frequency_penalty: Optional[float] - """Number between -2.0 and 2.0. - - Positive values penalize new tokens based on their existing frequency in the - text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - - logit_bias: Optional[Dict[str, int]] - """Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the - tokenizer) to an associated bias value from -100 to 100. Mathematically, the - bias is added to the logits generated by the model prior to sampling. The exact - effect will vary per model, but values between -1 and 1 should decrease or - increase likelihood of selection; values like -100 or 100 should result in a ban - or exclusive selection of the relevant token. - """ - - logprobs: Optional[bool] - """Whether to return log probabilities of the output tokens or not. - - If true, returns the log probabilities of each output token returned in the - `content` of `message`. - """ - - max_completion_tokens: Optional[int] - """ - The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. - """ - - max_tokens: Optional[int] - """The maximum number of tokens that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - n: Optional[int] - """How many chat completion choices to generate for each input message. - - Note that you will be charged based on the number of generated tokens across all - of the choices. Keep `n` as `1` to minimize costs. - """ - - presence_penalty: Optional[float] - """Number between -2.0 and 2.0. - - Positive values penalize new tokens based on whether they appear in the text so - far, increasing the model's likelihood to talk about new topics. - """ - - stop: Union[Optional[str], List[str], None] - """Up to 4 sequences where the API will stop generating further tokens. - - The returned text will not contain the stop sequence. - """ - - stream: Optional[bool] - """ - If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - """ - - stream_options: Optional[StreamOptions] - """Options for streaming response. Only set this when you set `stream: true`.""" - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. We generally recommend altering - this or `top_p` but not both. - """ - - top_logprobs: Optional[int] - """ - An integer between 0 and 20 specifying the number of most likely tokens to - return at each token position, each with an associated log probability. - `logprobs` must be set to `true` if this parameter is used. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - """ - - user: str - """ - A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - """ - - -class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] - """The contents of the system message.""" - - role: Required[Literal["system"]] - """The role of the messages author, in this case `system`.""" - - -class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] - """The contents of the developer message.""" - - role: Required[Literal["developer"]] - """The role of the messages author, in this case `developer`.""" - - -class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] - """The contents of the user message.""" - - role: Required[Literal["user"]] - """The role of the messages author, in this case `user`.""" - - -class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal( - TypedDict, total=False -): - refusal: Required[str] - """The refusal message generated by the model.""" - - type: Required[Literal["refusal"]] - """The type of the content part.""" - - -MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ - ChatCompletionRequestMessageContentPartTextParam, - MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal, -] - - -class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): - role: Required[Literal["assistant"]] - """The role of the messages author, in this case `assistant`.""" - - content: Union[str, Iterable[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] - """The contents of the assistant message.""" - - refusal: Optional[str] - """The refusal message by the assistant.""" - - -Message: TypeAlias = Union[ - MessageChatCompletionRequestSystemMessage, - MessageChatCompletionRequestDeveloperMessage, - MessageChatCompletionRequestUserMessage, - MessageChatCompletionRequestAssistantMessage, -] - - -class StreamOptions(TypedDict, total=False): - include_usage: bool - """If set, an additional chunk will be streamed before the `data: [DONE]` message. - - The `usage` field on this chunk shows the token usage statistics for the entire - request, and the `choices` field will always be an empty array. - - All other chunks will also include a `usage` field, but with a null value. - **NOTE:** If the stream is interrupted, you may not receive the final usage - chunk which contains the total token usage for the request. - """ diff --git a/src/gradientai/types/chat_create_completion_response.py b/src/gradientai/types/chat_create_completion_response.py deleted file mode 100644 index e1f20038..00000000 --- a/src/gradientai/types/chat_create_completion_response.py +++ /dev/null @@ -1,81 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .chat_completion_token_logprob import ChatCompletionTokenLogprob - -__all__ = ["ChatCreateCompletionResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] - - -class ChoiceLogprobs(BaseModel): - content: Optional[List[ChatCompletionTokenLogprob]] = None - """A list of message content tokens with log probability information.""" - - refusal: Optional[List[ChatCompletionTokenLogprob]] = None - """A list of message refusal tokens with log probability information.""" - - -class ChoiceMessage(BaseModel): - content: Optional[str] = None - """The contents of the message.""" - - refusal: Optional[str] = None - """The refusal message generated by the model.""" - - role: Literal["assistant"] - """The role of the author of this message.""" - - -class Choice(BaseModel): - finish_reason: Literal["stop", "length"] - """The reason the model stopped generating tokens. - - This will be `stop` if the model hit a natural stop point or a provided stop - sequence, or `length` if the maximum number of tokens specified in the request - was reached. - """ - - index: int - """The index of the choice in the list of choices.""" - - logprobs: Optional[ChoiceLogprobs] = None - """Log probability information for the choice.""" - - message: ChoiceMessage - """A chat completion message generated by the model.""" - - -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - -class ChatCreateCompletionResponse(BaseModel): - id: str - """A unique identifier for the chat completion.""" - - choices: List[Choice] - """A list of chat completion choices. - - Can be more than one if `n` is greater than 1. - """ - - created: int - """The Unix timestamp (in seconds) of when the chat completion was created.""" - - model: str - """The model used for the chat completion.""" - - object: Literal["chat.completion"] - """The object type, which is always `chat.completion`.""" - - usage: Optional[Usage] = None - """Usage statistics for the completion request.""" diff --git a/tests/api_resources/test_chat.py b/tests/api_resources/test_chat.py deleted file mode 100644 index 2c5bcbd8..00000000 --- a/tests/api_resources/test_chat.py +++ /dev/null @@ -1,184 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ChatCreateCompletionResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestChat: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_completion(self, client: GradientAI) -> None: - chat = client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_completion_with_all_params(self, client: GradientAI) -> None: - chat = client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - frequency_penalty=-2, - logit_bias={"foo": 0}, - logprobs=True, - max_completion_tokens=256, - max_tokens=0, - metadata={"foo": "string"}, - n=1, - presence_penalty=-2, - stop="\n", - stream=True, - stream_options={"include_usage": True}, - temperature=1, - top_logprobs=0, - top_p=1, - user="user-1234", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_completion(self, client: GradientAI) -> None: - response = client.chat.with_raw_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - chat = response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_completion(self, client: GradientAI) -> None: - with client.chat.with_streaming_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - chat = response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncChat: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_completion(self, async_client: AsyncGradientAI) -> None: - chat = await async_client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_completion_with_all_params(self, async_client: AsyncGradientAI) -> None: - chat = await async_client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - frequency_penalty=-2, - logit_bias={"foo": 0}, - logprobs=True, - max_completion_tokens=256, - max_tokens=0, - metadata={"foo": "string"}, - n=1, - presence_penalty=-2, - stop="\n", - stream=True, - stream_options={"include_usage": True}, - temperature=1, - top_logprobs=0, - top_p=1, - user="user-1234", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_completion(self, async_client: AsyncGradientAI) -> None: - response = await async_client.chat.with_raw_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - chat = await response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_completion(self, async_client: AsyncGradientAI) -> None: - async with async_client.chat.with_streaming_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - chat = await response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - assert cast(Any, response.is_closed) is True From 8c74bd0fc85a66894d9e0e2b7f37a6f56e22fcee Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 13:26:35 +0000 Subject: [PATCH 036/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 2 +- src/gradientai/resources/models.py | 125 ++++++++++++++++++-- src/gradientai/types/__init__.py | 1 + src/gradientai/types/model_list_params.py | 42 +++++++ src/gradientai/types/model_list_response.py | 39 +++++- tests/api_resources/test_models.py | 22 ++++ 7 files changed, 218 insertions(+), 17 deletions(-) create mode 100644 src/gradientai/types/model_list_params.py diff --git a/.stats.yml b/.stats.yml index 297debd9..d42f6e6a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 59 +configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 3e04a2c7a4b0b9b16bd2956a3208b942 +config_hash: 84ba29fbded3618d3cc3994639c82547 diff --git a/api.md b/api.md index 6a543de5..840a2d5f 100644 --- a/api.md +++ b/api.md @@ -332,4 +332,4 @@ from gradientai.types import Model, ModelListResponse Methods: - client.models.retrieve(model) -> Model -- client.models.list() -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index 81b75441..b0df90ad 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -2,9 +2,14 @@ from __future__ import annotations +from typing import List +from typing_extensions import Literal + import httpx +from ..types import model_list_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import ( @@ -77,6 +82,21 @@ def retrieve( def list( self, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,13 +105,50 @@ def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelListResponse: """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/models", + "/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -154,6 +211,21 @@ async def retrieve( async def list( self, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -162,13 +234,50 @@ async def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelListResponse: """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/models", + "/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 6992d67b..a389ecab 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -9,6 +9,7 @@ from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion +from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams diff --git a/src/gradientai/types/model_list_params.py b/src/gradientai/types/model_list_params.py new file mode 100644 index 00000000..4abc1dc1 --- /dev/null +++ b/src/gradientai/types/model_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["ModelListParams"] + + +class ModelListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" + + public_only: bool + """only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 8f835449..1d0e5eee 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,15 +1,42 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal +from typing import List, Optional +from datetime import datetime -from .model import Model from .._models import BaseModel +from .api_agreement import APIAgreement +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_model_version import APIModelVersion -__all__ = ["ModelListResponse"] +__all__ = ["ModelListResponse", "Model"] + + +class Model(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None class ModelListResponse(BaseModel): - data: List[Model] + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None - object: Literal["list"] + models: Optional[List[Model]] = None diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index b9559c8e..04133ed4 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -65,6 +65,17 @@ def test_method_list(self, client: GradientAI) -> None: model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: @@ -139,6 +150,17 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: From b9550f01a12dcdfec60f59d3c5665c6126d62184 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 02:18:31 +0000 Subject: [PATCH 037/200] chore(readme): update badges --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bf235be1..465a7a0c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Gradient AI Python API library -[![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg)](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) +[![PyPI version]()](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, From 1cdb6cb708a8f14e1a0a8e9437f2bc59c66b984b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 05:55:01 +0000 Subject: [PATCH 038/200] fix(tests): fix: tests which call HTTP endpoints directly with the example parameters --- tests/test_client.py | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index 4cf52324..d83082e3 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -24,7 +24,6 @@ from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError from gradientai._types import Omit from gradientai._models import BaseModel, FinalRequestOptions -from gradientai._constants import RAW_RESPONSE_HEADER from gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError from gradientai._base_client import ( DEFAULT_TIMEOUT, @@ -721,30 +720,21 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1548,30 +1538,25 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_timeout_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncGradientAI + ) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_status_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncGradientAI + ) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) From 3e2c0482ecc26116a7715bec545d1df70240d391 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 11:32:43 +0000 Subject: [PATCH 039/200] feat(api): update via SDK Studio --- .stats.yml | 8 +- api.md | 30 +-- src/gradientai/_client.py | 96 +-------- src/gradientai/resources/__init__.py | 28 --- src/gradientai/resources/agents/agents.py | 48 +++-- src/gradientai/resources/agents/api_keys.py | 40 +++- .../resources/agents/child_agents.py | 32 ++- src/gradientai/resources/agents/functions.py | 24 ++- .../resources/agents/knowledge_bases.py | 24 ++- src/gradientai/resources/agents/versions.py | 16 +- src/gradientai/resources/api_keys/api_keys.py | 8 +- .../resources/api_keys/api_keys_.py | 40 +++- src/gradientai/resources/auth/__init__.py | 33 --- .../resources/auth/agents/__init__.py | 33 --- .../resources/auth/agents/agents.py | 102 --------- src/gradientai/resources/auth/agents/token.py | 173 --------------- src/gradientai/resources/auth/auth.py | 102 --------- src/gradientai/resources/embeddings.py | 201 ------------------ src/gradientai/resources/indexing_jobs.py | 40 +++- .../resources/knowledge_bases/data_sources.py | 24 ++- .../knowledge_bases/knowledge_bases.py | 40 +++- src/gradientai/resources/models.py | 16 +- .../resources/providers/anthropic/keys.py | 48 +++-- .../resources/providers/openai/keys.py | 48 +++-- src/gradientai/resources/regions.py | 8 +- src/gradientai/types/__init__.py | 2 - src/gradientai/types/api_agent.py | 2 + src/gradientai/types/auth/agents/__init__.py | 3 - .../types/auth/agents/token_create_params.py | 13 -- .../auth/agents/token_create_response.py | 13 -- .../types/embedding_create_params.py | 28 --- .../types/embedding_create_response.py | 41 ---- .../types/knowledge_base_create_params.py | 16 +- .../api_knowledge_base_data_source.py | 12 +- tests/api_resources/auth/__init__.py | 1 - tests/api_resources/auth/agents/__init__.py | 1 - tests/api_resources/auth/agents/test_token.py | 124 ----------- tests/api_resources/test_embeddings.py | 116 ---------- tests/api_resources/test_knowledge_bases.py | 14 ++ 39 files changed, 398 insertions(+), 1250 deletions(-) delete mode 100644 src/gradientai/resources/auth/__init__.py delete mode 100644 src/gradientai/resources/auth/agents/__init__.py delete mode 100644 src/gradientai/resources/auth/agents/agents.py delete mode 100644 src/gradientai/resources/auth/agents/token.py delete mode 100644 src/gradientai/resources/auth/auth.py delete mode 100644 src/gradientai/resources/embeddings.py delete mode 100644 src/gradientai/types/auth/agents/token_create_params.py delete mode 100644 src/gradientai/types/auth/agents/token_create_response.py delete mode 100644 src/gradientai/types/embedding_create_params.py delete mode 100644 src/gradientai/types/embedding_create_response.py delete mode 100644 tests/api_resources/auth/__init__.py delete mode 100644 tests/api_resources/auth/agents/__init__.py delete mode 100644 tests/api_resources/auth/agents/test_token.py delete mode 100644 tests/api_resources/test_embeddings.py diff --git a/.stats.yml b/.stats.yml index d42f6e6a..9eb9eab4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 58 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml -openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 84ba29fbded3618d3cc3994639c82547 +configured_endpoints: 56 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml +openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 +config_hash: cfd2d18e8dfe7223b15ce9b204cef29e diff --git a/api.md b/api.md index 840a2d5f..278862d0 100644 --- a/api.md +++ b/api.md @@ -170,22 +170,6 @@ Methods: - client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse - client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse -# Auth - -## Agents - -### Token - -Types: - -```python -from gradientai.types.auth.agents import TokenCreateResponse -``` - -Methods: - -- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse - # Regions Types: @@ -306,21 +290,9 @@ Methods: Types: ```python -from gradientai.types import ChatCompletionRequestMessageContentPartText, ChatCompletionTokenLogprob -``` - -# Embeddings - -Types: - -```python -from gradientai.types import EmbeddingCreateResponse +from gradientai.types import ChatCompletionTokenLogprob ``` -Methods: - -- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse - # Models Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 9bf55fd7..e050112e 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,21 +31,9 @@ ) if TYPE_CHECKING: - from .resources import ( - auth, - agents, - models, - regions, - api_keys, - providers, - embeddings, - indexing_jobs, - knowledge_bases, - ) + from .resources import agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.auth.auth import AuthResource, AsyncAuthResource - from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource @@ -105,6 +93,7 @@ def __init__( if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") + self._base_url_overridden = base_url is not None if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -131,12 +120,6 @@ def providers(self) -> ProvidersResource: return ProvidersResource(self) - @cached_property - def auth(self) -> AuthResource: - from .resources.auth import AuthResource - - return AuthResource(self) - @cached_property def regions(self) -> RegionsResource: from .resources.regions import RegionsResource @@ -161,12 +144,6 @@ def api_keys(self) -> APIKeysResource: return APIKeysResource(self) - @cached_property - def embeddings(self) -> EmbeddingsResource: - from .resources.embeddings import EmbeddingsResource - - return EmbeddingsResource(self) - @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -237,7 +214,7 @@ def copy( params = set_default_query http_client = http_client or self._client - return self.__class__( + client = self.__class__( api_key=api_key or self.api_key, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -247,6 +224,8 @@ def copy( default_query=params, **_extra_kwargs, ) + client._base_url_overridden = self._base_url_overridden or base_url is not None + return client # Alias for `copy` for nicer inline usage, e.g. # client.with_options(timeout=10).foo.create(...) @@ -327,6 +306,7 @@ def __init__( if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") + self._base_url_overridden = base_url is not None if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -353,12 +333,6 @@ def providers(self) -> AsyncProvidersResource: return AsyncProvidersResource(self) - @cached_property - def auth(self) -> AsyncAuthResource: - from .resources.auth import AsyncAuthResource - - return AsyncAuthResource(self) - @cached_property def regions(self) -> AsyncRegionsResource: from .resources.regions import AsyncRegionsResource @@ -383,12 +357,6 @@ def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self) - @cached_property - def embeddings(self) -> AsyncEmbeddingsResource: - from .resources.embeddings import AsyncEmbeddingsResource - - return AsyncEmbeddingsResource(self) - @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -459,7 +427,7 @@ def copy( params = set_default_query http_client = http_client or self._client - return self.__class__( + client = self.__class__( api_key=api_key or self.api_key, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -469,6 +437,8 @@ def copy( default_query=params, **_extra_kwargs, ) + client._base_url_overridden = self._base_url_overridden or base_url is not None + return client # Alias for `copy` for nicer inline usage, e.g. # client.with_options(timeout=10).foo.create(...) @@ -526,12 +496,6 @@ def providers(self) -> providers.ProvidersResourceWithRawResponse: return ProvidersResourceWithRawResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AuthResourceWithRawResponse: - from .resources.auth import AuthResourceWithRawResponse - - return AuthResourceWithRawResponse(self._client.auth) - @cached_property def regions(self) -> regions.RegionsResourceWithRawResponse: from .resources.regions import RegionsResourceWithRawResponse @@ -556,12 +520,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse: - from .resources.embeddings import EmbeddingsResourceWithRawResponse - - return EmbeddingsResourceWithRawResponse(self._client.embeddings) - @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -587,12 +545,6 @@ def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: return AsyncProvidersResourceWithRawResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithRawResponse: - from .resources.auth import AsyncAuthResourceWithRawResponse - - return AsyncAuthResourceWithRawResponse(self._client.auth) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: from .resources.regions import AsyncRegionsResourceWithRawResponse @@ -617,12 +569,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse: - from .resources.embeddings import AsyncEmbeddingsResourceWithRawResponse - - return AsyncEmbeddingsResourceWithRawResponse(self._client.embeddings) - @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -648,12 +594,6 @@ def providers(self) -> providers.ProvidersResourceWithStreamingResponse: return ProvidersResourceWithStreamingResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AuthResourceWithStreamingResponse: - from .resources.auth import AuthResourceWithStreamingResponse - - return AuthResourceWithStreamingResponse(self._client.auth) - @cached_property def regions(self) -> regions.RegionsResourceWithStreamingResponse: from .resources.regions import RegionsResourceWithStreamingResponse @@ -678,12 +618,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse: - from .resources.embeddings import EmbeddingsResourceWithStreamingResponse - - return EmbeddingsResourceWithStreamingResponse(self._client.embeddings) - @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -709,12 +643,6 @@ def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: return AsyncProvidersResourceWithStreamingResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: - from .resources.auth import AsyncAuthResourceWithStreamingResponse - - return AsyncAuthResourceWithStreamingResponse(self._client.auth) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: from .resources.regions import AsyncRegionsResourceWithStreamingResponse @@ -739,12 +667,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse: - from .resources.embeddings import AsyncEmbeddingsResourceWithStreamingResponse - - return AsyncEmbeddingsResourceWithStreamingResponse(self._client.embeddings) - @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 05417215..82f79bc7 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -48,14 +40,6 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) -from .embeddings import ( - EmbeddingsResource, - AsyncEmbeddingsResource, - EmbeddingsResourceWithRawResponse, - AsyncEmbeddingsResourceWithRawResponse, - EmbeddingsResourceWithStreamingResponse, - AsyncEmbeddingsResourceWithStreamingResponse, -) from .indexing_jobs import ( IndexingJobsResource, AsyncIndexingJobsResource, @@ -86,12 +70,6 @@ "AsyncProvidersResourceWithRawResponse", "ProvidersResourceWithStreamingResponse", "AsyncProvidersResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", @@ -116,12 +94,6 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", - "EmbeddingsResource", - "AsyncEmbeddingsResource", - "EmbeddingsResourceWithRawResponse", - "AsyncEmbeddingsResourceWithRawResponse", - "EmbeddingsResourceWithStreamingResponse", - "AsyncEmbeddingsResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 036abf75..78439d33 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -159,7 +159,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -209,7 +211,9 @@ def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -278,7 +282,9 @@ def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_uuid}", + f"/v2/gen-ai/agents/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}", body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -337,7 +343,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -381,7 +389,9 @@ def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -418,7 +428,9 @@ def update_status( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/deployment_visibility", body=maybe_transform( { "body_uuid": body_uuid, @@ -515,7 +527,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -565,7 +579,9 @@ async def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -634,7 +650,9 @@ async def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_uuid}", + f"/v2/gen-ai/agents/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}", body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -693,7 +711,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -737,7 +757,9 @@ async def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -774,7 +796,9 @@ async def update_status( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/deployment_visibility", body=await async_maybe_transform( { "body_uuid": body_uuid, diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 4470850c..155e3adc 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -74,7 +74,9 @@ def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -121,7 +123,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -169,7 +173,9 @@ def list( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return self._get( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -216,7 +222,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -253,7 +261,9 @@ def regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -310,7 +320,9 @@ async def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return await self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -357,7 +369,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -405,7 +419,9 @@ async def list( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -452,7 +468,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -489,7 +507,9 @@ async def regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py index 163e52cf..9031d8ce 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -85,7 +85,9 @@ def update( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return self._put( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -132,7 +134,9 @@ def delete( if not child_agent_uuid: raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -179,7 +183,9 @@ def add( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return self._post( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -222,7 +228,9 @@ def view( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/agents/{uuid}/child_agents", + f"/v2/gen-ai/agents/{uuid}/child_agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/child_agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -291,7 +299,9 @@ async def update( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return await self._put( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=await async_maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -338,7 +348,9 @@ async def delete( if not child_agent_uuid: raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -385,7 +397,9 @@ async def add( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return await self._post( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=await async_maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -428,7 +442,9 @@ async def view( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{uuid}/child_agents", + f"/v2/gen-ai/agents/{uuid}/child_agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/child_agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 19c63d8c..67a811cc 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -77,7 +77,9 @@ def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -134,7 +136,9 @@ def update( if not path_function_uuid: raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -184,7 +188,9 @@ def delete( if not function_uuid: raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -246,7 +252,9 @@ async def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return await self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -303,7 +311,9 @@ async def update( if not path_function_uuid: raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -353,7 +363,9 @@ async def delete( if not function_uuid: raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index a400c56a..3b9b0cd2 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -67,7 +67,9 @@ def attach( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -106,7 +108,9 @@ def attach_single( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -145,7 +149,9 @@ def detach( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -200,7 +206,9 @@ async def attach( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return await self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -239,7 +247,9 @@ async def attach_single( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return await self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -278,7 +288,9 @@ async def detach( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return await self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index e77a252b..86dbf99f 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -71,7 +71,9 @@ def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_uuid}/versions", + f"/v2/gen-ai/agents/{path_uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/versions", body=maybe_transform( { "body_uuid": body_uuid, @@ -118,7 +120,9 @@ def list( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/agents/{uuid}/versions", + f"/v2/gen-ai/agents/{uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/versions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -185,7 +189,9 @@ async def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_uuid}/versions", + f"/v2/gen-ai/agents/{path_uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/versions", body=await async_maybe_transform( { "body_uuid": body_uuid, @@ -232,7 +238,9 @@ async def list( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{uuid}/versions", + f"/v2/gen-ai/agents/{uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/versions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py index 355cea17..ee94a02d 100644 --- a/src/gradientai/resources/api_keys/api_keys.py +++ b/src/gradientai/resources/api_keys/api_keys.py @@ -104,7 +104,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -203,7 +205,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys/api_keys_.py index 03d70150..7bea219b 100644 --- a/src/gradientai/resources/api_keys/api_keys_.py +++ b/src/gradientai/resources/api_keys/api_keys_.py @@ -69,7 +69,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -106,7 +108,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{path_api_key_uuid}", body=maybe_transform( { "body_api_key_uuid": body_api_key_uuid, @@ -149,7 +153,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -193,7 +199,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -227,7 +235,9 @@ def update_regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._put( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -279,7 +289,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -316,7 +328,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{path_api_key_uuid}", body=await async_maybe_transform( { "body_api_key_uuid": body_api_key_uuid, @@ -359,7 +373,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -403,7 +419,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -437,7 +455,9 @@ async def update_regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/auth/__init__.py b/src/gradientai/resources/auth/__init__.py deleted file mode 100644 index 7c844a98..00000000 --- a/src/gradientai/resources/auth/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/auth/agents/__init__.py b/src/gradientai/resources/auth/agents/__init__.py deleted file mode 100644 index 2972198f..00000000 --- a/src/gradientai/resources/auth/agents/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = [ - "TokenResource", - "AsyncTokenResource", - "TokenResourceWithRawResponse", - "AsyncTokenResourceWithRawResponse", - "TokenResourceWithStreamingResponse", - "AsyncTokenResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/auth/agents/agents.py b/src/gradientai/resources/auth/agents/agents.py deleted file mode 100644 index a0aa9faf..00000000 --- a/src/gradientai/resources/auth/agents/agents.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["AgentsResource", "AsyncAgentsResource"] - - -class AgentsResource(SyncAPIResource): - @cached_property - def token(self) -> TokenResource: - return TokenResource(self._client) - - @cached_property - def with_raw_response(self) -> AgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AgentsResourceWithStreamingResponse(self) - - -class AsyncAgentsResource(AsyncAPIResource): - @cached_property - def token(self) -> AsyncTokenResource: - return AsyncTokenResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAgentsResourceWithStreamingResponse(self) - - -class AgentsResourceWithRawResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> TokenResourceWithRawResponse: - return TokenResourceWithRawResponse(self._agents.token) - - -class AsyncAgentsResourceWithRawResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> AsyncTokenResourceWithRawResponse: - return AsyncTokenResourceWithRawResponse(self._agents.token) - - -class AgentsResourceWithStreamingResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> TokenResourceWithStreamingResponse: - return TokenResourceWithStreamingResponse(self._agents.token) - - -class AsyncAgentsResourceWithStreamingResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> AsyncTokenResourceWithStreamingResponse: - return AsyncTokenResourceWithStreamingResponse(self._agents.token) diff --git a/src/gradientai/resources/auth/agents/token.py b/src/gradientai/resources/auth/agents/token.py deleted file mode 100644 index f39c892d..00000000 --- a/src/gradientai/resources/auth/agents/token.py +++ /dev/null @@ -1,173 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.auth.agents import token_create_params -from ....types.auth.agents.token_create_response import TokenCreateResponse - -__all__ = ["TokenResource", "AsyncTokenResource"] - - -class TokenResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> TokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return TokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return TokenResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", - body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class AsyncTokenResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncTokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncTokenResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", - body=await async_maybe_transform( - {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class TokenResourceWithRawResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_raw_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithRawResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_raw_response_wrapper( - token.create, - ) - - -class TokenResourceWithStreamingResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_streamed_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithStreamingResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_streamed_response_wrapper( - token.create, - ) diff --git a/src/gradientai/resources/auth/auth.py b/src/gradientai/resources/auth/auth.py deleted file mode 100644 index 985fc56c..00000000 --- a/src/gradientai/resources/auth/auth.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .agents.agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = ["AuthResource", "AsyncAuthResource"] - - -class AuthResource(SyncAPIResource): - @cached_property - def agents(self) -> AgentsResource: - return AgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AuthResourceWithStreamingResponse(self) - - -class AsyncAuthResource(AsyncAPIResource): - @cached_property - def agents(self) -> AsyncAgentsResource: - return AsyncAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAuthResourceWithStreamingResponse(self) - - -class AuthResourceWithRawResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithRawResponse: - return AgentsResourceWithRawResponse(self._auth.agents) - - -class AsyncAuthResourceWithRawResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithRawResponse: - return AsyncAgentsResourceWithRawResponse(self._auth.agents) - - -class AuthResourceWithStreamingResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithStreamingResponse: - return AgentsResourceWithStreamingResponse(self._auth.agents) - - -class AsyncAuthResourceWithStreamingResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithStreamingResponse: - return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/gradientai/resources/embeddings.py b/src/gradientai/resources/embeddings.py deleted file mode 100644 index 1bcd3145..00000000 --- a/src/gradientai/resources/embeddings.py +++ /dev/null @@ -1,201 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union - -import httpx - -from ..types import embedding_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.embedding_create_response import EmbeddingCreateResponse - -__all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"] - - -class EmbeddingsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return EmbeddingsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return EmbeddingsResourceWithStreamingResponse(self) - - def create( - self, - *, - input: Union[str, List[str]], - model: str, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EmbeddingCreateResponse: - """ - Creates an embedding vector representing the input text. - - Args: - input: Input text to embed, encoded as a string or array of tokens. To embed multiple - inputs in a single request, pass an array of strings. - - model: ID of the model to use. You can use the List models API to see all of your - available models. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/embeddings", - body=maybe_transform( - { - "input": input, - "model": model, - "user": user, - }, - embedding_create_params.EmbeddingCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=EmbeddingCreateResponse, - ) - - -class AsyncEmbeddingsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncEmbeddingsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncEmbeddingsResourceWithStreamingResponse(self) - - async def create( - self, - *, - input: Union[str, List[str]], - model: str, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EmbeddingCreateResponse: - """ - Creates an embedding vector representing the input text. - - Args: - input: Input text to embed, encoded as a string or array of tokens. To embed multiple - inputs in a single request, pass an array of strings. - - model: ID of the model to use. You can use the List models API to see all of your - available models. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/embeddings", - body=await async_maybe_transform( - { - "input": input, - "model": model, - "user": user, - }, - embedding_create_params.EmbeddingCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=EmbeddingCreateResponse, - ) - - -class EmbeddingsResourceWithRawResponse: - def __init__(self, embeddings: EmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = to_raw_response_wrapper( - embeddings.create, - ) - - -class AsyncEmbeddingsResourceWithRawResponse: - def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = async_to_raw_response_wrapper( - embeddings.create, - ) - - -class EmbeddingsResourceWithStreamingResponse: - def __init__(self, embeddings: EmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = to_streamed_response_wrapper( - embeddings.create, - ) - - -class AsyncEmbeddingsResourceWithStreamingResponse: - def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = async_to_streamed_response_wrapper( - embeddings.create, - ) diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py index d0b933e8..fcbcf43d 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -73,7 +73,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", body=maybe_transform( { "data_source_uuids": data_source_uuids, @@ -114,7 +116,9 @@ def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/indexing_jobs/{uuid}", + f"/v2/gen-ai/indexing_jobs/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -151,7 +155,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -195,7 +201,9 @@ def retrieve_data_sources( if not indexing_job_uuid: raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") return self._get( - f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -232,7 +240,9 @@ def update_cancel( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", body=maybe_transform( {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams ), @@ -289,7 +299,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", body=await async_maybe_transform( { "data_source_uuids": data_source_uuids, @@ -330,7 +342,9 @@ async def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/indexing_jobs/{uuid}", + f"/v2/gen-ai/indexing_jobs/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -367,7 +381,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -411,7 +427,9 @@ async def retrieve_data_sources( if not indexing_job_uuid: raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") return await self._get( - f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -448,7 +466,9 @@ async def update_cancel( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", body=await async_maybe_transform( {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams ), diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index 68714895..b549b3dc 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -81,7 +81,9 @@ def create( f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" ) return self._post( - f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", body=maybe_transform( { "aws_data_source": aws_data_source, @@ -132,7 +134,9 @@ def list( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return self._get( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -181,7 +185,9 @@ def delete( if not data_source_uuid: raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") return self._delete( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -242,7 +248,9 @@ async def create( f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" ) return await self._post( - f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", body=await async_maybe_transform( { "aws_data_source": aws_data_source, @@ -293,7 +301,9 @@ async def list( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return await self._get( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -342,7 +352,9 @@ async def delete( if not data_source_uuid: raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") return await self._delete( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index f73ab08c..cf0cd8d8 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -109,7 +109,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", body=maybe_transform( { "database_id": database_id, @@ -156,7 +158,9 @@ def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -202,7 +206,9 @@ def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/knowledge_bases/{path_uuid}", + f"/v2/gen-ai/knowledge_bases/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}", body=maybe_transform( { "database_id": database_id, @@ -249,7 +255,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -293,7 +301,9 @@ def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._delete( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -375,7 +385,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", body=await async_maybe_transform( { "database_id": database_id, @@ -422,7 +434,9 @@ async def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -468,7 +482,9 @@ async def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/knowledge_bases/{path_uuid}", + f"/v2/gen-ai/knowledge_bases/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}", body=await async_maybe_transform( { "database_id": database_id, @@ -515,7 +531,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -559,7 +577,9 @@ async def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._delete( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index b0df90ad..d8b6b385 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -72,7 +72,9 @@ def retrieve( if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - f"/models/{model}", + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -134,7 +136,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -201,7 +205,9 @@ async def retrieve( if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - f"/models/{model}", + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -263,7 +269,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py index 1b11fc99..9c1f6391 100644 --- a/src/gradientai/resources/providers/anthropic/keys.py +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -72,7 +72,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", body=maybe_transform( { "api_key": api_key, @@ -113,7 +115,9 @@ def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._get( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -150,7 +154,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", body=maybe_transform( { "api_key": api_key, @@ -195,7 +201,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -239,7 +247,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -278,7 +288,9 @@ def list_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -342,7 +354,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", body=await async_maybe_transform( { "api_key": api_key, @@ -383,7 +397,9 @@ async def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._get( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -420,7 +436,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", body=await async_maybe_transform( { "api_key": api_key, @@ -465,7 +483,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -509,7 +529,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -548,7 +570,9 @@ async def list_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py index abcb22f0..9bfaba8e 100644 --- a/src/gradientai/resources/providers/openai/keys.py +++ b/src/gradientai/resources/providers/openai/keys.py @@ -71,7 +71,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", body=maybe_transform( { "api_key": api_key, @@ -112,7 +114,9 @@ def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._get( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -149,7 +153,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", body=maybe_transform( { "api_key": api_key, @@ -193,7 +199,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -237,7 +245,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -276,7 +286,9 @@ def retrieve_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/openai/keys/{uuid}/agents", + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -339,7 +351,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", body=await async_maybe_transform( { "api_key": api_key, @@ -380,7 +394,9 @@ async def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._get( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -417,7 +433,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", body=await async_maybe_transform( { "api_key": api_key, @@ -461,7 +479,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -505,7 +525,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -544,7 +566,9 @@ async def retrieve_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/openai/keys/{uuid}/agents", + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index bbf07c3e..43c2038b 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -70,7 +70,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/regions", + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -137,7 +139,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/regions", + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index a389ecab..4b12d65c 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -26,10 +26,8 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index d6e18ca2..0a8df679 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -261,3 +261,5 @@ class APIAgent(BaseModel): user_id: Optional[str] = None uuid: Optional[str] = None + + workspace: Optional[object] = None diff --git a/src/gradientai/types/auth/agents/__init__.py b/src/gradientai/types/auth/agents/__init__.py index 9fae55b6..f8ee8b14 100644 --- a/src/gradientai/types/auth/agents/__init__.py +++ b/src/gradientai/types/auth/agents/__init__.py @@ -1,6 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .token_create_params import TokenCreateParams as TokenCreateParams -from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/gradientai/types/auth/agents/token_create_params.py b/src/gradientai/types/auth/agents/token_create_params.py deleted file mode 100644 index 0df640f9..00000000 --- a/src/gradientai/types/auth/agents/token_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["TokenCreateParams"] - - -class TokenCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/gradientai/types/auth/agents/token_create_response.py b/src/gradientai/types/auth/agents/token_create_response.py deleted file mode 100644 index e58b7399..00000000 --- a/src/gradientai/types/auth/agents/token_create_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel - -__all__ = ["TokenCreateResponse"] - - -class TokenCreateResponse(BaseModel): - access_token: Optional[str] = None - - refresh_token: Optional[str] = None diff --git a/src/gradientai/types/embedding_create_params.py b/src/gradientai/types/embedding_create_params.py deleted file mode 100644 index d3e923ad..00000000 --- a/src/gradientai/types/embedding_create_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union -from typing_extensions import Required, TypedDict - -__all__ = ["EmbeddingCreateParams"] - - -class EmbeddingCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str]]] - """Input text to embed, encoded as a string or array of tokens. - - To embed multiple inputs in a single request, pass an array of strings. - """ - - model: Required[str] - """ID of the model to use. - - You can use the List models API to see all of your available models. - """ - - user: str - """ - A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - """ diff --git a/src/gradientai/types/embedding_create_response.py b/src/gradientai/types/embedding_create_response.py deleted file mode 100644 index 19c474fd..00000000 --- a/src/gradientai/types/embedding_create_response.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["EmbeddingCreateResponse", "Data", "Usage"] - - -class Data(BaseModel): - embedding: List[float] - """The embedding vector, which is a list of floats.""" - - index: int - """The index of the embedding in the list of embeddings.""" - - object: Literal["embedding"] - """The object type, which is always "embedding".""" - - -class Usage(BaseModel): - prompt_tokens: int - """The number of tokens used by the prompt.""" - - total_tokens: int - """The total number of tokens used by the request.""" - - -class EmbeddingCreateResponse(BaseModel): - data: List[Data] - """The list of embeddings generated by the model.""" - - model: str - """The name of the model used to generate the embedding.""" - - object: Literal["list"] - """The object type, which is always "list".""" - - usage: Usage - """The usage information for the request.""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index 3a58166b..2552bcf6 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -9,7 +9,7 @@ from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["KnowledgeBaseCreateParams", "Datasource"] +__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceAwsDataSource"] class KnowledgeBaseCreateParams(TypedDict, total=False): @@ -49,7 +49,21 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): vpc_uuid: str +class DatasourceAwsDataSource(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str + + class Datasource(TypedDict, total=False): + aws_data_source: DatasourceAwsDataSource + bucket_name: str bucket_region: str diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index df1cd3bb..57080aaa 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -9,10 +9,20 @@ from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource -__all__ = ["APIKnowledgeBaseDataSource"] +__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource"] + + +class AwsDataSource(BaseModel): + bucket_name: Optional[str] = None + + item_path: Optional[str] = None + + region: Optional[str] = None class APIKnowledgeBaseDataSource(BaseModel): + aws_data_source: Optional[AwsDataSource] = None + bucket_name: Optional[str] = None created_at: Optional[datetime] = None diff --git a/tests/api_resources/auth/__init__.py b/tests/api_resources/auth/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/auth/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/__init__.py b/tests/api_resources/auth/agents/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/auth/agents/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py deleted file mode 100644 index ef721cd0..00000000 --- a/tests/api_resources/auth/agents/test_token.py +++ /dev/null @@ -1,124 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.auth.agents import TokenCreateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestToken: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) - - -class TestAsyncToken: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py deleted file mode 100644 index e5b394ef..00000000 --- a/tests/api_resources/test_embeddings.py +++ /dev/null @@ -1,116 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import EmbeddingCreateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEmbeddings: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - embedding = client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - embedding = client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - user="user-1234", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.embeddings.with_raw_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - embedding = response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.embeddings.with_streaming_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - embedding = response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncEmbeddings: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - embedding = await async_client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - embedding = await async_client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - user="user-1234", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.embeddings.with_raw_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - embedding = await response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.embeddings.with_streaming_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - embedding = await response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index e204f9fe..c9171644 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -36,6 +36,13 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: database_id="database_id", datasources=[ { + "aws_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, "bucket_name": "bucket_name", "bucket_region": "bucket_region", "file_upload_data_source": { @@ -281,6 +288,13 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI database_id="database_id", datasources=[ { + "aws_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, "bucket_name": "bucket_name", "bucket_region": "bucket_region", "file_upload_data_source": { From 3e5e109a452f3dca6488302531949eb64f4cfd70 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 11:35:24 +0000 Subject: [PATCH 040/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 8 +- src/gradientai/_client.py | 39 +- src/gradientai/resources/__init__.py | 14 + src/gradientai/resources/chat/__init__.py | 33 ++ src/gradientai/resources/chat/chat.py | 102 +++++ src/gradientai/resources/chat/completions.py | 385 ++++++++++++++++++ src/gradientai/types/auth/agents/__init__.py | 3 - src/gradientai/types/chat/__init__.py | 6 + .../types/chat/completion_create_params.py | 185 +++++++++ .../types/chat/completion_create_response.py | 190 +++++++++ .../api_resources/chat}/__init__.py | 2 - tests/api_resources/chat/test_completions.py | 184 +++++++++ 13 files changed, 1146 insertions(+), 9 deletions(-) create mode 100644 src/gradientai/resources/chat/__init__.py create mode 100644 src/gradientai/resources/chat/chat.py create mode 100644 src/gradientai/resources/chat/completions.py delete mode 100644 src/gradientai/types/auth/agents/__init__.py create mode 100644 src/gradientai/types/chat/__init__.py create mode 100644 src/gradientai/types/chat/completion_create_params.py create mode 100644 src/gradientai/types/chat/completion_create_response.py rename {src/gradientai/types/auth => tests/api_resources/chat}/__init__.py (70%) create mode 100644 tests/api_resources/chat/test_completions.py diff --git a/.stats.yml b/.stats.yml index 9eb9eab4..bd458f47 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 56 +configured_endpoints: 57 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: cfd2d18e8dfe7223b15ce9b204cef29e +config_hash: c880014064b4d19b42254d47f1bb2758 diff --git a/api.md b/api.md index 278862d0..ed9acf78 100644 --- a/api.md +++ b/api.md @@ -287,12 +287,18 @@ Methods: # Chat +## Completions + Types: ```python -from gradientai.types import ChatCompletionTokenLogprob +from gradientai.types.chat import CompletionCreateResponse ``` +Methods: + +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse + # Models Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index e050112e..6927ff10 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,9 +31,10 @@ ) if TYPE_CHECKING: - from .resources import agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource @@ -144,6 +145,12 @@ def api_keys(self) -> APIKeysResource: return APIKeysResource(self) + @cached_property + def chat(self) -> ChatResource: + from .resources.chat import ChatResource + + return ChatResource(self) + @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -357,6 +364,12 @@ def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self) + @cached_property + def chat(self) -> AsyncChatResource: + from .resources.chat import AsyncChatResource + + return AsyncChatResource(self) + @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -520,6 +533,12 @@ def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.ChatResourceWithRawResponse: + from .resources.chat import ChatResourceWithRawResponse + + return ChatResourceWithRawResponse(self._client.chat) + @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -569,6 +588,12 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.AsyncChatResourceWithRawResponse: + from .resources.chat import AsyncChatResourceWithRawResponse + + return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -618,6 +643,12 @@ def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.ChatResourceWithStreamingResponse: + from .resources.chat import ChatResourceWithStreamingResponse + + return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -667,6 +698,12 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: + from .resources.chat import AsyncChatResourceWithStreamingResponse + + return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 82f79bc7..de26662c 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -94,6 +102,12 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", diff --git a/src/gradientai/resources/chat/__init__.py b/src/gradientai/resources/chat/__init__.py new file mode 100644 index 00000000..ec960eb4 --- /dev/null +++ b/src/gradientai/resources/chat/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = [ + "CompletionsResource", + "AsyncCompletionsResource", + "CompletionsResourceWithRawResponse", + "AsyncCompletionsResourceWithRawResponse", + "CompletionsResourceWithStreamingResponse", + "AsyncCompletionsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py new file mode 100644 index 00000000..ac19d849 --- /dev/null +++ b/src/gradientai/resources/chat/chat.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = ["ChatResource", "AsyncChatResource"] + + +class ChatResource(SyncAPIResource): + @cached_property + def completions(self) -> CompletionsResource: + return CompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> ChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return ChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return ChatResourceWithStreamingResponse(self) + + +class AsyncChatResource(AsyncAPIResource): + @cached_property + def completions(self) -> AsyncCompletionsResource: + return AsyncCompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncChatResourceWithStreamingResponse(self) + + +class ChatResourceWithRawResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithRawResponse: + return CompletionsResourceWithRawResponse(self._chat.completions) + + +class AsyncChatResourceWithRawResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithRawResponse: + return AsyncCompletionsResourceWithRawResponse(self._chat.completions) + + +class ChatResourceWithStreamingResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithStreamingResponse: + return CompletionsResourceWithStreamingResponse(self._chat.completions) + + +class AsyncChatResourceWithStreamingResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithStreamingResponse: + return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions) diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py new file mode 100644 index 00000000..62ab8f0d --- /dev/null +++ b/src/gradientai/resources/chat/completions.py @@ -0,0 +1,385 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.chat import completion_create_params +from ..._base_client import make_request_options +from ...types.chat.completion_create_response import CompletionCreateResponse + +__all__ = ["CompletionsResource", "AsyncCompletionsResource"] + + +class CompletionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return CompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return CompletionsResourceWithStreamingResponse(self) + + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/chat/completions" + if self._client._base_url_overridden + else "https://inference.do-ai.run/v1/chat/completions", + body=maybe_transform( + { + "messages": messages, + "model": model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "n": n, + "presence_penalty": presence_penalty, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class AsyncCompletionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncCompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncCompletionsResourceWithStreamingResponse(self) + + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/chat/completions" + if self._client._base_url_overridden + else "https://inference.do-ai.run/v1/chat/completions", + body=await async_maybe_transform( + { + "messages": messages, + "model": model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "n": n, + "presence_penalty": presence_penalty, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class CompletionsResourceWithRawResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_raw_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithRawResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_raw_response_wrapper( + completions.create, + ) + + +class CompletionsResourceWithStreamingResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_streamed_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithStreamingResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_streamed_response_wrapper( + completions.create, + ) diff --git a/src/gradientai/types/auth/agents/__init__.py b/src/gradientai/types/auth/agents/__init__.py deleted file mode 100644 index f8ee8b14..00000000 --- a/src/gradientai/types/auth/agents/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py new file mode 100644 index 00000000..9384ac14 --- /dev/null +++ b/src/gradientai/types/chat/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py new file mode 100644 index 00000000..11d032ff --- /dev/null +++ b/src/gradientai/types/chat/completion_create_params.py @@ -0,0 +1,185 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "CompletionCreateParams", + "Message", + "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestAssistantMessage", + "StreamOptions", +] + + +class CompletionCreateParams(TypedDict, total=False): + messages: Required[Iterable[Message]] + """A list of messages comprising the conversation so far.""" + + model: Required[str] + """Model ID used to generate the response.""" + + frequency_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on their existing frequency in the + text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + + logit_bias: Optional[Dict[str, int]] + """Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + """ + + logprobs: Optional[bool] + """Whether to return log probabilities of the output tokens or not. + + If true, returns the log probabilities of each output token returned in the + `content` of `message`. + """ + + max_completion_tokens: Optional[int] + """ + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + """ + + max_tokens: Optional[int] + """The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + n: Optional[int] + """How many chat completion choices to generate for each input message. + + Note that you will be charged based on the number of generated tokens across all + of the choices. Keep `n` as `1` to minimize costs. + """ + + presence_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on whether they appear in the text so + far, increasing the model's likelihood to talk about new topics. + """ + + stop: Union[Optional[str], List[str], None] + """Up to 4 sequences where the API will stop generating further tokens. + + The returned text will not contain the stop sequence. + """ + + stream: Optional[bool] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + stream_options: Optional[StreamOptions] + """Options for streaming response. Only set this when you set `stream: true`.""" + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + top_logprobs: Optional[int] + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: str + """ + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + """ + + +class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the system message.""" + + role: Required[Literal["system"]] + """The role of the messages author, in this case `system`.""" + + +class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + +class MessageChatCompletionRequestUserMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the user message.""" + + role: Required[Literal["user"]] + """The role of the messages author, in this case `user`.""" + + +class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): + role: Required[Literal["assistant"]] + """The role of the messages author, in this case `assistant`.""" + + content: Union[str, List[str], None] + """The contents of the assistant message.""" + + +Message: TypeAlias = Union[ + MessageChatCompletionRequestSystemMessage, + MessageChatCompletionRequestDeveloperMessage, + MessageChatCompletionRequestUserMessage, + MessageChatCompletionRequestAssistantMessage, +] + + +class StreamOptions(TypedDict, total=False): + include_usage: bool + """If set, an additional chunk will be streamed before the `data: [DONE]` message. + + The `usage` field on this chunk shows the token usage statistics for the entire + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. + """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py new file mode 100644 index 00000000..5a25ac7c --- /dev/null +++ b/src/gradientai/types/chat/completion_create_response.py @@ -0,0 +1,190 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = [ + "CompletionCreateResponse", + "Choice", + "ChoiceLogprobs", + "ChoiceLogprobsContent", + "ChoiceLogprobsContentTopLogprob", + "ChoiceLogprobsRefusal", + "ChoiceLogprobsRefusalTopLogprob", + "ChoiceMessage", + "Usage", +] + + +class ChoiceLogprobsContentTopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChoiceLogprobsContent(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[ChoiceLogprobsContentTopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ + + +class ChoiceLogprobsRefusalTopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChoiceLogprobsRefusal(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChoiceLogprobsContent]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChoiceLogprobsRefusal]] = None + """A list of message refusal tokens with log probability information.""" + + +class ChoiceMessage(BaseModel): + content: Optional[str] = None + """The contents of the message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Literal["assistant"] + """The role of the author of this message.""" + + +class Choice(BaseModel): + finish_reason: Literal["stop", "length"] + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, or `length` if the maximum number of tokens specified in the request + was reached. + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + message: ChoiceMessage + """A chat completion message generated by the model.""" + + +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + +class CompletionCreateResponse(BaseModel): + id: str + """A unique identifier for the chat completion.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can be more than one if `n` is greater than 1. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created.""" + + model: str + """The model used for the chat completion.""" + + object: Literal["chat.completion"] + """The object type, which is always `chat.completion`.""" + + usage: Optional[Usage] = None + """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/auth/__init__.py b/tests/api_resources/chat/__init__.py similarity index 70% rename from src/gradientai/types/auth/__init__.py rename to tests/api_resources/chat/__init__.py index f8ee8b14..fd8019a9 100644 --- a/src/gradientai/types/auth/__init__.py +++ b/tests/api_resources/chat/__init__.py @@ -1,3 +1 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py new file mode 100644 index 00000000..17319d86 --- /dev/null +++ b/tests/api_resources/chat/test_completions.py @@ -0,0 +1,184 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.chat import CompletionCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCompletions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncCompletions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True From 749d04f24b32365d09579489f053b1d17d8ded1d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 23:07:22 +0000 Subject: [PATCH 041/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 25 +- src/gradientai/_client.py | 2 +- .../{api_keys/api_keys_.py => api_keys.py} | 24 +- src/gradientai/resources/api_keys/__init__.py | 19 - src/gradientai/resources/api_keys/api_keys.py | 279 ----------- src/gradientai/resources/models.py | 85 ---- src/gradientai/types/__init__.py | 10 +- src/gradientai/types/api_agreement.py | 17 - .../{api_keys => }/api_key_create_params.py | 0 .../{api_keys => }/api_key_create_response.py | 2 +- .../{api_keys => }/api_key_delete_response.py | 2 +- src/gradientai/types/api_key_list_params.py | 29 +- src/gradientai/types/api_key_list_response.py | 32 +- .../{api_keys => }/api_key_update_params.py | 2 +- .../api_key_update_regenerate_response.py | 2 +- .../{api_keys => }/api_key_update_response.py | 2 +- src/gradientai/types/api_keys/__init__.py | 13 - .../types/api_keys/api_key_list_params.py | 15 - .../types/api_keys/api_key_list_response.py | 18 - src/gradientai/types/api_model.py | 26 +- .../{api_keys => }/api_model_api_key_info.py | 2 +- src/gradientai/types/api_model_version.py | 15 - src/gradientai/types/model.py | 21 - src/gradientai/types/model_list_response.py | 26 +- tests/api_resources/api_keys/__init__.py | 1 - .../api_resources/api_keys/test_api_keys_.py | 446 ------------------ tests/api_resources/test_api_keys.py | 356 +++++++++++++- tests/api_resources/test_models.py | 86 +--- 29 files changed, 433 insertions(+), 1128 deletions(-) rename src/gradientai/resources/{api_keys/api_keys_.py => api_keys.py} (96%) delete mode 100644 src/gradientai/resources/api_keys/__init__.py delete mode 100644 src/gradientai/resources/api_keys/api_keys.py delete mode 100644 src/gradientai/types/api_agreement.py rename src/gradientai/types/{api_keys => }/api_key_create_params.py (100%) rename src/gradientai/types/{api_keys => }/api_key_create_response.py (90%) rename src/gradientai/types/{api_keys => }/api_key_delete_response.py (90%) rename src/gradientai/types/{api_keys => }/api_key_update_params.py (90%) rename src/gradientai/types/{api_keys => }/api_key_update_regenerate_response.py (90%) rename src/gradientai/types/{api_keys => }/api_key_update_response.py (90%) delete mode 100644 src/gradientai/types/api_keys/__init__.py delete mode 100644 src/gradientai/types/api_keys/api_key_list_params.py delete mode 100644 src/gradientai/types/api_keys/api_key_list_response.py rename src/gradientai/types/{api_keys => }/api_model_api_key_info.py (93%) delete mode 100644 src/gradientai/types/api_model_version.py delete mode 100644 src/gradientai/types/model.py delete mode 100644 tests/api_resources/api_keys/__init__.py delete mode 100644 tests/api_resources/api_keys/test_api_keys_.py diff --git a/.stats.yml b/.stats.yml index bd458f47..01cc76ec 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 57 +configured_endpoints: 56 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: c880014064b4d19b42254d47f1bb2758 +config_hash: 6abb2ff94db8b1b61321606275ba3e80 diff --git a/api.md b/api.md index ed9acf78..bac163c8 100644 --- a/api.md +++ b/api.md @@ -255,19 +255,7 @@ Methods: Types: ```python -from gradientai.types import APIAgreement, APIModelVersion, APIKeyListResponse -``` - -Methods: - -- client.api_keys.list(\*\*params) -> APIKeyListResponse - -## APIKeys - -Types: - -```python -from gradientai.types.api_keys import ( +from gradientai.types import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -279,11 +267,11 @@ from gradientai.types.api_keys import ( Methods: -- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse -- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.api_keys.list(\*\*params) -> APIKeyListResponse +- client.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Chat @@ -309,5 +297,4 @@ from gradientai.types import Model, ModelListResponse Methods: -- client.models.retrieve(model) -> Model - client.models.list(\*\*params) -> ModelListResponse diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 6927ff10..bec52a23 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -34,10 +34,10 @@ from .resources import chat, agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.api_keys import APIKeysResource, AsyncAPIKeysResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys.py similarity index 96% rename from src/gradientai/resources/api_keys/api_keys_.py rename to src/gradientai/resources/api_keys.py index 7bea219b..be1e346b 100644 --- a/src/gradientai/resources/api_keys/api_keys_.py +++ b/src/gradientai/resources/api_keys.py @@ -4,23 +4,23 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import api_key_list_params, api_key_create_params, api_key_update_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.api_keys.api_key_list_response import APIKeyListResponse -from ...types.api_keys.api_key_create_response import APIKeyCreateResponse -from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse -from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse -from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse +from .._base_client import make_request_options +from ..types.api_key_list_response import APIKeyListResponse +from ..types.api_key_create_response import APIKeyCreateResponse +from ..types.api_key_delete_response import APIKeyDeleteResponse +from ..types.api_key_update_response import APIKeyUpdateResponse +from ..types.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/api_keys/__init__.py b/src/gradientai/resources/api_keys/__init__.py deleted file mode 100644 index ed14565c..00000000 --- a/src/gradientai/resources/api_keys/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py deleted file mode 100644 index ee94a02d..00000000 --- a/src/gradientai/resources/api_keys/api_keys.py +++ /dev/null @@ -1,279 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from . import api_keys_ as api_keys -from ...types import api_key_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.api_key_list_response import APIKeyListResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.APIKeysResource: - return api_keys.APIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResource: - return api_keys.AsyncAPIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index d8b6b385..2c7b40ab 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -18,7 +18,6 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..types.model import Model from .._base_client import make_request_options from ..types.model_list_response import ModelListResponse @@ -45,42 +44,6 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def retrieve( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: - """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return self._get( - f"/models/{model}" - if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - def list( self, *, @@ -178,42 +141,6 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def retrieve( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: - """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return await self._get( - f"/models/{model}" - if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - async def list( self, *, @@ -295,9 +222,6 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_raw_response_wrapper( - models.retrieve, - ) self.list = to_raw_response_wrapper( models.list, ) @@ -307,9 +231,6 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_raw_response_wrapper( - models.retrieve, - ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -319,9 +240,6 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_streamed_response_wrapper( - models.retrieve, - ) self.list = to_streamed_response_wrapper( models.list, ) @@ -331,9 +249,6 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_streamed_response_wrapper( - models.retrieve, - ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 4b12d65c..ddbbe52d 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,13 +2,10 @@ from __future__ import annotations -from .model import Model as Model from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel -from .api_agreement import APIAgreement as APIAgreement from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams -from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams @@ -22,9 +19,15 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility @@ -44,6 +47,7 @@ from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, diff --git a/src/gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py deleted file mode 100644 index c4359f1f..00000000 --- a/src/gradientai/types/api_agreement.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIAgreement"] - - -class APIAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/gradientai/types/api_keys/api_key_create_params.py b/src/gradientai/types/api_key_create_params.py similarity index 100% rename from src/gradientai/types/api_keys/api_key_create_params.py rename to src/gradientai/types/api_key_create_params.py diff --git a/src/gradientai/types/api_keys/api_key_create_response.py b/src/gradientai/types/api_key_create_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_create_response.py rename to src/gradientai/types/api_key_create_response.py index 654e9f1e..2d6024cf 100644 --- a/src/gradientai/types/api_keys/api_key_create_response.py +++ b/src/gradientai/types/api_key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/gradientai/types/api_keys/api_key_delete_response.py b/src/gradientai/types/api_key_delete_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_delete_response.py rename to src/gradientai/types/api_key_delete_response.py index 4d81d047..d65286c8 100644 --- a/src/gradientai/types/api_keys/api_key_delete_response.py +++ b/src/gradientai/types/api_key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/gradientai/types/api_key_list_params.py b/src/gradientai/types/api_key_list_params.py index a1ab60dc..11da9398 100644 --- a/src/gradientai/types/api_key_list_params.py +++ b/src/gradientai/types/api_key_list_params.py @@ -2,8 +2,7 @@ from __future__ import annotations -from typing import List -from typing_extensions import Literal, TypedDict +from typing_extensions import TypedDict __all__ = ["APIKeyListParams"] @@ -14,29 +13,3 @@ class APIKeyListParams(TypedDict, total=False): per_page: int """items per page.""" - - public_only: bool - """only include models that are publicly available.""" - - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - """include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - """ diff --git a/src/gradientai/types/api_key_list_response.py b/src/gradientai/types/api_key_list_response.py index 360de7a4..db45102b 100644 --- a/src/gradientai/types/api_key_list_response.py +++ b/src/gradientai/types/api_key_list_response.py @@ -1,42 +1,18 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional -from datetime import datetime from .._models import BaseModel -from .api_agreement import APIAgreement from .agents.api_meta import APIMeta from .agents.api_links import APILinks -from .api_model_version import APIModelVersion +from .api_model_api_key_info import APIModelAPIKeyInfo -__all__ = ["APIKeyListResponse", "Model"] - - -class Model(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None +__all__ = ["APIKeyListResponse"] class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + links: Optional[APILinks] = None meta: Optional[APIMeta] = None - - models: Optional[List[Model]] = None diff --git a/src/gradientai/types/api_keys/api_key_update_params.py b/src/gradientai/types/api_key_update_params.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_update_params.py rename to src/gradientai/types/api_key_update_params.py index 23c1c0b9..1678304f 100644 --- a/src/gradientai/types/api_keys/api_key_update_params.py +++ b/src/gradientai/types/api_key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["APIKeyUpdateParams"] diff --git a/src/gradientai/types/api_keys/api_key_update_regenerate_response.py b/src/gradientai/types/api_key_update_regenerate_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_update_regenerate_response.py rename to src/gradientai/types/api_key_update_regenerate_response.py index 44a316dc..eaf19b6e 100644 --- a/src/gradientai/types/api_keys/api_key_update_regenerate_response.py +++ b/src/gradientai/types/api_key_update_regenerate_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateRegenerateResponse"] diff --git a/src/gradientai/types/api_keys/api_key_update_response.py b/src/gradientai/types/api_key_update_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_update_response.py rename to src/gradientai/types/api_key_update_response.py index 3671addf..a8d79898 100644 --- a/src/gradientai/types/api_keys/api_key_update_response.py +++ b/src/gradientai/types/api_key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/gradientai/types/api_keys/__init__.py b/src/gradientai/types/api_keys/__init__.py deleted file mode 100644 index c3cbcd6d..00000000 --- a/src/gradientai/types/api_keys/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams -from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/api_keys/api_key_list_params.py b/src/gradientai/types/api_keys/api_key_list_params.py deleted file mode 100644 index 11da9398..00000000 --- a/src/gradientai/types/api_keys/api_key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/gradientai/types/api_keys/api_key_list_response.py b/src/gradientai/types/api_keys/api_key_list_response.py deleted file mode 100644 index 535e2f96..00000000 --- a/src/gradientai/types/api_keys/api_key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index d680a638..82120454 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -5,14 +5,30 @@ from typing_extensions import Literal from .._models import BaseModel -from .api_agreement import APIAgreement -from .api_model_version import APIModelVersion -__all__ = ["APIModel"] +__all__ = ["APIModel", "Agreement", "Version"] + + +class Agreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Version(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None class APIModel(BaseModel): - agreement: Optional[APIAgreement] = None + agreement: Optional[Agreement] = None created_at: Optional[datetime] = None @@ -54,4 +70,4 @@ class APIModel(BaseModel): uuid: Optional[str] = None - version: Optional[APIModelVersion] = None + version: Optional[Version] = None diff --git a/src/gradientai/types/api_keys/api_model_api_key_info.py b/src/gradientai/types/api_model_api_key_info.py similarity index 93% rename from src/gradientai/types/api_keys/api_model_api_key_info.py rename to src/gradientai/types/api_model_api_key_info.py index bf354a47..c05c9cef 100644 --- a/src/gradientai/types/api_keys/api_model_api_key_info.py +++ b/src/gradientai/types/api_model_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIModelAPIKeyInfo"] diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py deleted file mode 100644 index 2e118632..00000000 --- a/src/gradientai/types/api_model_version.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIModelVersion"] - - -class APIModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py deleted file mode 100644 index 2631ee8d..00000000 --- a/src/gradientai/types/model.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["Model"] - - -class Model(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 1d0e5eee..29d6a34e 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -4,16 +4,32 @@ from datetime import datetime from .._models import BaseModel -from .api_agreement import APIAgreement from .agents.api_meta import APIMeta from .agents.api_links import APILinks -from .api_model_version import APIModelVersion -__all__ = ["ModelListResponse", "Model"] +__all__ = ["ModelListResponse", "Model", "ModelAgreement", "ModelVersion"] + + +class ModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class ModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None class Model(BaseModel): - agreement: Optional[APIAgreement] = None + agreement: Optional[ModelAgreement] = None created_at: Optional[datetime] = None @@ -31,7 +47,7 @@ class Model(BaseModel): uuid: Optional[str] = None - version: Optional[APIModelVersion] = None + version: Optional[ModelVersion] = None class ModelListResponse(BaseModel): diff --git a/tests/api_resources/api_keys/__init__.py b/tests/api_resources/api_keys/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/api_keys/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/api_keys/test_api_keys_.py b/tests/api_resources/api_keys/test_api_keys_.py deleted file mode 100644 index 01e8dcfa..00000000 --- a/tests/api_resources/api_keys/test_api_keys_.py +++ /dev/null @@ -1,446 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.api_keys import ( - APIKeyListResponse, - APIKeyCreateResponse, - APIKeyDeleteResponse, - APIKeyUpdateResponse, - APIKeyUpdateRegenerateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_regenerate(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_regenerate(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_regenerate(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/test_api_keys.py index fa1895c9..a06e0b3a 100644 --- a/tests/api_resources/test_api_keys.py +++ b/tests/api_resources/test_api_keys.py @@ -9,7 +9,13 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import APIKeyListResponse +from gradientai.types import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, + APIKeyUpdateRegenerateResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,6 +23,94 @@ class TestAPIKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + api_key = client.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + api_key = client.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -29,8 +123,6 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: api_key = client.api_keys.list( page=0, per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -56,10 +148,182 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + api_key = client.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_regenerate(self, client: GradientAI) -> None: + api_key = client.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_regenerate(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_regenerate(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.with_raw_response.update_regenerate( + "", + ) + class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -72,8 +336,6 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) api_key = await async_client.api_keys.list( page=0, per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -98,3 +360,87 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(APIKeyListResponse, api_key, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.with_raw_response.update_regenerate( + "", + ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 04133ed4..946b2eb9 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import Model, ModelListResponse +from gradientai.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,48 +17,6 @@ class TestModels: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - model = client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.models.with_raw_response.retrieve( - "llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.models.with_raw_response.retrieve( - "", - ) - @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -102,48 +60,6 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncModels: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.models.with_raw_response.retrieve( - "llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.models.with_raw_response.retrieve( - "", - ) - @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: From f4f9f6c5c4a265a37b10ebd6404b7699bfb19358 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 02:56:29 +0000 Subject: [PATCH 042/200] docs(client): fix httpx.Timeout documentation reference --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 465a7a0c..36edcfbd 100644 --- a/README.md +++ b/README.md @@ -166,7 +166,7 @@ client.with_options(max_retries=5).agents.versions.list( ### Timeouts By default requests time out after 1 minute. You can configure this with a `timeout` option, -which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python from gradientai import GradientAI From 0508acf1a1d2282867f17b89f851ef783f3728d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:50:33 +0000 Subject: [PATCH 043/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 39 ++- src/gradientai/_client.py | 76 +++--- src/gradientai/resources/__init__.py | 26 +- .../resources/inference/__init__.py | 47 ++++ .../resources/{ => inference}/api_keys.py | 24 +- .../resources/inference/inference.py | 134 +++++++++++ src/gradientai/resources/inference/models.py | 226 ++++++++++++++++++ src/gradientai/types/__init__.py | 10 +- src/gradientai/types/inference/__init__.py | 15 ++ .../{ => inference}/api_key_create_params.py | 0 .../api_key_create_response.py | 2 +- .../api_key_delete_response.py | 2 +- .../{ => inference}/api_key_list_params.py | 0 .../{ => inference}/api_key_list_response.py | 6 +- .../{ => inference}/api_key_update_params.py | 2 +- .../api_key_update_regenerate_response.py | 2 +- .../api_key_update_response.py | 2 +- .../{ => inference}/api_model_api_key_info.py | 2 +- .../types/inference/model_list_response.py | 28 +++ .../inference/model_retrieve_response.py | 21 ++ src/gradientai/types/model.py | 48 ++++ src/gradientai/types/model_list_response.py | 44 +--- tests/api_resources/inference/__init__.py | 1 + .../{ => inference}/test_api_keys.py | 86 +++---- tests/api_resources/inference/test_models.py | 162 +++++++++++++ 26 files changed, 829 insertions(+), 180 deletions(-) create mode 100644 src/gradientai/resources/inference/__init__.py rename src/gradientai/resources/{ => inference}/api_keys.py (96%) create mode 100644 src/gradientai/resources/inference/inference.py create mode 100644 src/gradientai/resources/inference/models.py create mode 100644 src/gradientai/types/inference/__init__.py rename src/gradientai/types/{ => inference}/api_key_create_params.py (100%) rename src/gradientai/types/{ => inference}/api_key_create_response.py (90%) rename src/gradientai/types/{ => inference}/api_key_delete_response.py (90%) rename src/gradientai/types/{ => inference}/api_key_list_params.py (100%) rename src/gradientai/types/{ => inference}/api_key_list_response.py (77%) rename src/gradientai/types/{ => inference}/api_key_update_params.py (90%) rename src/gradientai/types/{ => inference}/api_key_update_regenerate_response.py (90%) rename src/gradientai/types/{ => inference}/api_key_update_response.py (90%) rename src/gradientai/types/{ => inference}/api_model_api_key_info.py (93%) create mode 100644 src/gradientai/types/inference/model_list_response.py create mode 100644 src/gradientai/types/inference/model_retrieve_response.py create mode 100644 src/gradientai/types/model.py create mode 100644 tests/api_resources/inference/__init__.py rename tests/api_resources/{ => inference}/test_api_keys.py (82%) create mode 100644 tests/api_resources/inference/test_models.py diff --git a/.stats.yml b/.stats.yml index 01cc76ec..3f5210da 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 56 +configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 6abb2ff94db8b1b61321606275ba3e80 +config_hash: 01ce4f461115cf14fd2b26a7d08a3a6a diff --git a/api.md b/api.md index bac163c8..3f713c24 100644 --- a/api.md +++ b/api.md @@ -250,12 +250,28 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse -# APIKeys +# Chat + +## Completions Types: ```python -from gradientai.types import ( +from gradientai.types.chat import CompletionCreateResponse +``` + +Methods: + +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse + +# Inference + +## APIKeys + +Types: + +```python +from gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -267,25 +283,24 @@ from gradientai.types import ( Methods: -- client.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.api_keys.list(\*\*params) -> APIKeyListResponse -- client.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse -# Chat - -## Completions +## Models Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from gradientai.types.inference import ModelRetrieveResponse, ModelListResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.inference.models.retrieve(model) -> ModelRetrieveResponse +- client.inference.models.list() -> ModelListResponse # Models diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index bec52a23..0a5eb9a1 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,13 +31,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.api_keys import APIKeysResource, AsyncAPIKeysResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -139,18 +139,18 @@ def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self) - @cached_property - def api_keys(self) -> APIKeysResource: - from .resources.api_keys import APIKeysResource - - return APIKeysResource(self) - @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource return ChatResource(self) + @cached_property + def inference(self) -> InferenceResource: + from .resources.inference import InferenceResource + + return InferenceResource(self) + @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -358,18 +358,18 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self) - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - from .resources.api_keys import AsyncAPIKeysResource - - return AsyncAPIKeysResource(self) - @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource return AsyncChatResource(self) + @cached_property + def inference(self) -> AsyncInferenceResource: + from .resources.inference import AsyncInferenceResource + + return AsyncInferenceResource(self) + @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -527,18 +527,18 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - from .resources.api_keys import APIKeysResourceWithRawResponse - - return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse return ChatResourceWithRawResponse(self._client.chat) + @cached_property + def inference(self) -> inference.InferenceResourceWithRawResponse: + from .resources.inference import InferenceResourceWithRawResponse + + return InferenceResourceWithRawResponse(self._client.inference) + @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -582,18 +582,18 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse - - return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property + def inference(self) -> inference.AsyncInferenceResourceWithRawResponse: + from .resources.inference import AsyncInferenceResourceWithRawResponse + + return AsyncInferenceResourceWithRawResponse(self._client.inference) + @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -637,18 +637,18 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - from .resources.api_keys import APIKeysResourceWithStreamingResponse - - return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def inference(self) -> inference.InferenceResourceWithStreamingResponse: + from .resources.inference import InferenceResourceWithStreamingResponse + + return InferenceResourceWithStreamingResponse(self._client.inference) + @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -692,18 +692,18 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse - - return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse: + from .resources.inference import AsyncInferenceResourceWithStreamingResponse + + return AsyncInferenceResourceWithStreamingResponse(self._client.inference) + @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index de26662c..1763a13e 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -32,13 +32,13 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, +from .inference import ( + InferenceResource, + AsyncInferenceResource, + InferenceResourceWithRawResponse, + AsyncInferenceResourceWithRawResponse, + InferenceResourceWithStreamingResponse, + AsyncInferenceResourceWithStreamingResponse, ) from .providers import ( ProvidersResource, @@ -96,18 +96,18 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", + "InferenceResource", + "AsyncInferenceResource", + "InferenceResourceWithRawResponse", + "AsyncInferenceResourceWithRawResponse", + "InferenceResourceWithStreamingResponse", + "AsyncInferenceResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", diff --git a/src/gradientai/resources/inference/__init__.py b/src/gradientai/resources/inference/__init__.py new file mode 100644 index 00000000..0e5631ce --- /dev/null +++ b/src/gradientai/resources/inference/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .inference import ( + InferenceResource, + AsyncInferenceResource, + InferenceResourceWithRawResponse, + AsyncInferenceResourceWithRawResponse, + InferenceResourceWithStreamingResponse, + AsyncInferenceResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", + "InferenceResource", + "AsyncInferenceResource", + "InferenceResourceWithRawResponse", + "AsyncInferenceResourceWithRawResponse", + "InferenceResourceWithStreamingResponse", + "AsyncInferenceResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/api_keys.py b/src/gradientai/resources/inference/api_keys.py similarity index 96% rename from src/gradientai/resources/api_keys.py rename to src/gradientai/resources/inference/api_keys.py index be1e346b..c00212f8 100644 --- a/src/gradientai/resources/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -4,23 +4,23 @@ import httpx -from ..types import api_key_list_params, api_key_create_params, api_key_update_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.api_key_list_response import APIKeyListResponse -from ..types.api_key_create_response import APIKeyCreateResponse -from ..types.api_key_delete_response import APIKeyDeleteResponse -from ..types.api_key_update_response import APIKeyUpdateResponse -from ..types.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse +from ..._base_client import make_request_options +from ...types.inference import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.inference.api_key_list_response import APIKeyListResponse +from ...types.inference.api_key_create_response import APIKeyCreateResponse +from ...types.inference.api_key_delete_response import APIKeyDeleteResponse +from ...types.inference.api_key_update_response import APIKeyUpdateResponse +from ...types.inference.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py new file mode 100644 index 00000000..325353dc --- /dev/null +++ b/src/gradientai/resources/inference/inference.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["InferenceResource", "AsyncInferenceResource"] + + +class InferenceResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + + @cached_property + def models(self) -> ModelsResource: + return ModelsResource(self._client) + + @cached_property + def with_raw_response(self) -> InferenceResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return InferenceResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> InferenceResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return InferenceResourceWithStreamingResponse(self) + + +class AsyncInferenceResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + + @cached_property + def models(self) -> AsyncModelsResource: + return AsyncModelsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncInferenceResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncInferenceResourceWithStreamingResponse(self) + + +class InferenceResourceWithRawResponse: + def __init__(self, inference: InferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._inference.api_keys) + + @cached_property + def models(self) -> ModelsResourceWithRawResponse: + return ModelsResourceWithRawResponse(self._inference.models) + + +class AsyncInferenceResourceWithRawResponse: + def __init__(self, inference: AsyncInferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._inference.api_keys) + + @cached_property + def models(self) -> AsyncModelsResourceWithRawResponse: + return AsyncModelsResourceWithRawResponse(self._inference.models) + + +class InferenceResourceWithStreamingResponse: + def __init__(self, inference: InferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._inference.api_keys) + + @cached_property + def models(self) -> ModelsResourceWithStreamingResponse: + return ModelsResourceWithStreamingResponse(self._inference.models) + + +class AsyncInferenceResourceWithStreamingResponse: + def __init__(self, inference: AsyncInferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._inference.api_keys) + + @cached_property + def models(self) -> AsyncModelsResourceWithStreamingResponse: + return AsyncModelsResourceWithStreamingResponse(self._inference.models) diff --git a/src/gradientai/resources/inference/models.py b/src/gradientai/resources/inference/models.py new file mode 100644 index 00000000..c36f6cee --- /dev/null +++ b/src/gradientai/resources/inference/models.py @@ -0,0 +1,226 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.inference.model_list_response import ModelListResponse +from ...types.inference.model_retrieve_response import ModelRetrieveResponse + +__all__ = ["ModelsResource", "AsyncModelsResource"] + + +class ModelsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return ModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return ModelsResourceWithStreamingResponse(self) + + def retrieve( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelRetrieveResponse: + """ + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return self._get( + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelListResponse, + ) + + +class AsyncModelsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncModelsResourceWithStreamingResponse(self) + + async def retrieve( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelRetrieveResponse: + """ + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return await self._get( + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return await self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelListResponse, + ) + + +class ModelsResourceWithRawResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.retrieve = to_raw_response_wrapper( + models.retrieve, + ) + self.list = to_raw_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithRawResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.retrieve = async_to_raw_response_wrapper( + models.retrieve, + ) + self.list = async_to_raw_response_wrapper( + models.list, + ) + + +class ModelsResourceWithStreamingResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.retrieve = to_streamed_response_wrapper( + models.retrieve, + ) + self.list = to_streamed_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithStreamingResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.retrieve = async_to_streamed_response_wrapper( + models.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + models.list, + ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index ddbbe52d..091fe110 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .model import Model as Model from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob @@ -12,22 +13,14 @@ from .agent_create_params import AgentCreateParams as AgentCreateParams from .agent_list_response import AgentListResponse as AgentListResponse from .agent_update_params import AgentUpdateParams as AgentUpdateParams -from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod from .region_list_response import RegionListResponse as RegionListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility @@ -47,7 +40,6 @@ from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, diff --git a/src/gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py new file mode 100644 index 00000000..d1ccb71b --- /dev/null +++ b/src/gradientai/types/inference/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .model_list_response import ModelListResponse as ModelListResponse +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse +from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/gradientai/types/api_key_create_params.py rename to src/gradientai/types/inference/api_key_create_params.py diff --git a/src/gradientai/types/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py similarity index 90% rename from src/gradientai/types/api_key_create_response.py rename to src/gradientai/types/inference/api_key_create_response.py index 2d6024cf..654e9f1e 100644 --- a/src/gradientai/types/api_key_create_response.py +++ b/src/gradientai/types/inference/api_key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/gradientai/types/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py similarity index 90% rename from src/gradientai/types/api_key_delete_response.py rename to src/gradientai/types/inference/api_key_delete_response.py index d65286c8..4d81d047 100644 --- a/src/gradientai/types/api_key_delete_response.py +++ b/src/gradientai/types/inference/api_key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/gradientai/types/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/gradientai/types/api_key_list_params.py rename to src/gradientai/types/inference/api_key_list_params.py diff --git a/src/gradientai/types/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py similarity index 77% rename from src/gradientai/types/api_key_list_response.py rename to src/gradientai/types/inference/api_key_list_response.py index db45102b..535e2f96 100644 --- a/src/gradientai/types/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -2,9 +2,9 @@ from typing import List, Optional -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from ..._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/gradientai/types/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py similarity index 90% rename from src/gradientai/types/api_key_update_params.py rename to src/gradientai/types/inference/api_key_update_params.py index 1678304f..23c1c0b9 100644 --- a/src/gradientai/types/api_key_update_params.py +++ b/src/gradientai/types/inference/api_key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from .._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["APIKeyUpdateParams"] diff --git a/src/gradientai/types/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py similarity index 90% rename from src/gradientai/types/api_key_update_regenerate_response.py rename to src/gradientai/types/inference/api_key_update_regenerate_response.py index eaf19b6e..44a316dc 100644 --- a/src/gradientai/types/api_key_update_regenerate_response.py +++ b/src/gradientai/types/inference/api_key_update_regenerate_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateRegenerateResponse"] diff --git a/src/gradientai/types/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py similarity index 90% rename from src/gradientai/types/api_key_update_response.py rename to src/gradientai/types/inference/api_key_update_response.py index a8d79898..3671addf 100644 --- a/src/gradientai/types/api_key_update_response.py +++ b/src/gradientai/types/inference/api_key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/gradientai/types/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py similarity index 93% rename from src/gradientai/types/api_model_api_key_info.py rename to src/gradientai/types/inference/api_model_api_key_info.py index c05c9cef..bf354a47 100644 --- a/src/gradientai/types/api_model_api_key_info.py +++ b/src/gradientai/types/inference/api_model_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["APIModelAPIKeyInfo"] diff --git a/src/gradientai/types/inference/model_list_response.py b/src/gradientai/types/inference/model_list_response.py new file mode 100644 index 00000000..64f1e5b4 --- /dev/null +++ b/src/gradientai/types/inference/model_list_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ModelListResponse", "Data"] + + +class Data(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" + + +class ModelListResponse(BaseModel): + data: List[Data] + + object: Literal["list"] diff --git a/src/gradientai/types/inference/model_retrieve_response.py b/src/gradientai/types/inference/model_retrieve_response.py new file mode 100644 index 00000000..1b8fca25 --- /dev/null +++ b/src/gradientai/types/inference/model_retrieve_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ModelRetrieveResponse"] + + +class ModelRetrieveResponse(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py new file mode 100644 index 00000000..cba51b07 --- /dev/null +++ b/src/gradientai/types/model.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["Model", "Agreement", "Version"] + + +class Agreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Version(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class Model(BaseModel): + agreement: Optional[Agreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[Version] = None diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 29d6a34e..93d9ae04 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,53 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional -from datetime import datetime +from .model import Model from .._models import BaseModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks -__all__ = ["ModelListResponse", "Model", "ModelAgreement", "ModelVersion"] - - -class ModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class ModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[ModelAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[ModelVersion] = None +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): diff --git a/tests/api_resources/inference/__init__.py b/tests/api_resources/inference/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/inference/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py similarity index 82% rename from tests/api_resources/test_api_keys.py rename to tests/api_resources/inference/test_api_keys.py index a06e0b3a..d84572c7 100644 --- a/tests/api_resources/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,13 +26,13 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - api_key = client.api_keys.create() + api_key = client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.create( + api_key = client.inference.api_keys.create( name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.create() + response = client.inference.api_keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -50,7 +50,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.create() as response: + with client.inference.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,7 +62,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - api_key = client.api_keys.update( + api_key = client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -70,7 +70,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.update( + api_key = client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", body_api_key_uuid="api_key_uuid", name="name", @@ -80,7 +80,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.update( + response = client.inference.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -92,7 +92,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.update( + with client.inference.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -107,20 +107,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.api_keys.with_raw_response.update( + client.inference.api_keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - api_key = client.api_keys.list() + api_key = client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.list( + api_key = client.inference.api_keys.list( page=0, per_page=0, ) @@ -129,7 +129,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.list() + response = client.inference.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -139,7 +139,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.list() as response: + with client.inference.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -151,7 +151,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - api_key = client.api_keys.delete( + api_key = client.inference.api_keys.delete( "api_key_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -159,7 +159,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.delete( + response = client.inference.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -171,7 +171,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.delete( + with client.inference.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -186,14 +186,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.with_raw_response.delete( + client.inference.api_keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_regenerate(self, client: GradientAI) -> None: - api_key = client.api_keys.update_regenerate( + api_key = client.inference.api_keys.update_regenerate( "api_key_uuid", ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -201,7 +201,7 @@ def test_method_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_regenerate(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.update_regenerate( + response = client.inference.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.update_regenerate( + with client.inference.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: @parametrize def test_path_params_update_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.with_raw_response.update_regenerate( + client.inference.api_keys.with_raw_response.update_regenerate( "", ) @@ -239,13 +239,13 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.create() + api_key = await async_client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.create( + api_key = await async_client.inference.api_keys.create( name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -253,7 +253,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.create() + response = await async_client.inference.api_keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -263,7 +263,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.create() as response: + async with async_client.inference.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -275,7 +275,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.update( + api_key = await async_client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -283,7 +283,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.update( + api_key = await async_client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", body_api_key_uuid="api_key_uuid", name="name", @@ -293,7 +293,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.update( + response = await async_client.inference.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -305,7 +305,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.update( + async with async_client.inference.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -320,20 +320,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.api_keys.with_raw_response.update( + await async_client.inference.api_keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.list() + api_key = await async_client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.list( + api_key = await async_client.inference.api_keys.list( page=0, per_page=0, ) @@ -342,7 +342,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.list() + response = await async_client.inference.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -352,7 +352,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.list() as response: + async with async_client.inference.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -364,7 +364,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.delete( + api_key = await async_client.inference.api_keys.delete( "api_key_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -372,7 +372,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.delete( + response = await async_client.inference.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -384,7 +384,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.delete( + async with async_client.inference.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -399,14 +399,14 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.with_raw_response.delete( + await async_client.inference.api_keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.update_regenerate( + api_key = await async_client.inference.api_keys.update_regenerate( "api_key_uuid", ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -414,7 +414,7 @@ async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.update_regenerate( + response = await async_client.inference.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -426,7 +426,7 @@ async def test_raw_response_update_regenerate(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.update_regenerate( + async with async_client.inference.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: assert not response.is_closed @@ -441,6 +441,6 @@ async def test_streaming_response_update_regenerate(self, async_client: AsyncGra @parametrize async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.with_raw_response.update_regenerate( + await async_client.inference.api_keys.with_raw_response.update_regenerate( "", ) diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py new file mode 100644 index 00000000..7fb735fb --- /dev/null +++ b/tests/api_resources/inference/test_models.py @@ -0,0 +1,162 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.inference import ModelListResponse, ModelRetrieveResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModels: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + model = client.inference.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.inference.models.with_raw_response.retrieve( + "llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.inference.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.inference.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.inference.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.inference.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.inference.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModels: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + model = await async_client.inference.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.inference.models.with_raw_response.retrieve( + "llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.inference.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.inference.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.inference.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.inference.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.inference.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True From a08b39654b06df5ed6b1ff3c80e28ba4f18e3861 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:51:29 +0000 Subject: [PATCH 044/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 4 ++-- src/gradientai/resources/inference/models.py | 10 +++++----- src/gradientai/types/inference/__init__.py | 2 +- .../{model_retrieve_response.py => model.py} | 4 ++-- .../types/inference/model_list_response.py | 19 +++---------------- tests/api_resources/inference/test_models.py | 14 +++++++------- 7 files changed, 21 insertions(+), 34 deletions(-) rename src/gradientai/types/inference/{model_retrieve_response.py => model.py} (86%) diff --git a/.stats.yml b/.stats.yml index 3f5210da..b82a0dc7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 01ce4f461115cf14fd2b26a7d08a3a6a +config_hash: f8b4e76be8bb430b0a00d2fd04d71615 diff --git a/api.md b/api.md index 3f713c24..adc47296 100644 --- a/api.md +++ b/api.md @@ -294,12 +294,12 @@ Methods: Types: ```python -from gradientai.types.inference import ModelRetrieveResponse, ModelListResponse +from gradientai.types.inference import Model, ModelListResponse ``` Methods: -- client.inference.models.retrieve(model) -> ModelRetrieveResponse +- client.inference.models.retrieve(model) -> Model - client.inference.models.list() -> ModelListResponse # Models diff --git a/src/gradientai/resources/inference/models.py b/src/gradientai/resources/inference/models.py index c36f6cee..da327695 100644 --- a/src/gradientai/resources/inference/models.py +++ b/src/gradientai/resources/inference/models.py @@ -14,8 +14,8 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from ...types.inference.model import Model from ...types.inference.model_list_response import ModelListResponse -from ...types.inference.model_retrieve_response import ModelRetrieveResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -50,7 +50,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> Model: """ Retrieves a model instance, providing basic information about the model such as the owner and permissioning. @@ -73,7 +73,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ModelRetrieveResponse, + cast_to=Model, ) def list( @@ -129,7 +129,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> Model: """ Retrieves a model instance, providing basic information about the model such as the owner and permissioning. @@ -152,7 +152,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ModelRetrieveResponse, + cast_to=Model, ) async def list( diff --git a/src/gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py index d1ccb71b..829340d7 100644 --- a/src/gradientai/types/inference/__init__.py +++ b/src/gradientai/types/inference/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .model import Model as Model from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams @@ -11,5 +12,4 @@ from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/inference/model_retrieve_response.py b/src/gradientai/types/inference/model.py similarity index 86% rename from src/gradientai/types/inference/model_retrieve_response.py rename to src/gradientai/types/inference/model.py index 1b8fca25..ed8843e8 100644 --- a/src/gradientai/types/inference/model_retrieve_response.py +++ b/src/gradientai/types/inference/model.py @@ -4,10 +4,10 @@ from ..._models import BaseModel -__all__ = ["ModelRetrieveResponse"] +__all__ = ["Model"] -class ModelRetrieveResponse(BaseModel): +class Model(BaseModel): id: str """The model identifier, which can be referenced in the API endpoints.""" diff --git a/src/gradientai/types/inference/model_list_response.py b/src/gradientai/types/inference/model_list_response.py index 64f1e5b4..01bf3b62 100644 --- a/src/gradientai/types/inference/model_list_response.py +++ b/src/gradientai/types/inference/model_list_response.py @@ -3,26 +3,13 @@ from typing import List from typing_extensions import Literal +from .model import Model from ..._models import BaseModel -__all__ = ["ModelListResponse", "Data"] - - -class Data(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - data: List[Data] + data: List[Model] object: Literal["list"] diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py index 7fb735fb..936801cb 100644 --- a/tests/api_resources/inference/test_models.py +++ b/tests/api_resources/inference/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import ModelListResponse, ModelRetrieveResponse +from gradientai.types.inference import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -23,7 +23,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: model = client.inference.models.retrieve( "llama3-8b-instruct", ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -35,7 +35,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -47,7 +47,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) assert cast(Any, response.is_closed) is True @@ -97,7 +97,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: model = await async_client.inference.models.retrieve( "llama3-8b-instruct", ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -109,7 +109,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -121,7 +121,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) assert cast(Any, response.is_closed) is True From f46002accdc36d983ea6da8e6aff7815c9b034a3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:52:47 +0000 Subject: [PATCH 045/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 3 +- src/gradientai/types/__init__.py | 1 - src/gradientai/types/agent_list_response.py | 139 +++++++++++++++++- src/gradientai/types/api_agent.py | 139 +++++++++++++++++- src/gradientai/types/api_model.py | 27 +--- .../types/api_openai_api_key_info.py | 70 ++++++++- src/gradientai/types/model.py | 48 ------ src/gradientai/types/model_list_response.py | 4 +- 9 files changed, 344 insertions(+), 89 deletions(-) delete mode 100644 src/gradientai/types/model.py diff --git a/.stats.yml b/.stats.yml index b82a0dc7..8f85d58c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: f8b4e76be8bb430b0a00d2fd04d71615 +config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 diff --git a/api.md b/api.md index adc47296..2376a11f 100644 --- a/api.md +++ b/api.md @@ -8,7 +8,6 @@ from gradientai.types import ( APIAgentAPIKeyInfo, APIAnthropicAPIKeyInfo, APIDeploymentVisibility, - APIModel, APIOpenAIAPIKeyInfo, APIRetrievalMethod, AgentCreateResponse, @@ -307,7 +306,7 @@ Methods: Types: ```python -from gradientai.types import Model, ModelListResponse +from gradientai.types import APIModel, ModelListResponse ``` Methods: diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 091fe110..5ee961c6 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,7 +2,6 @@ from __future__ import annotations -from .model import Model as Model from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 4cedbb39..6af9cd51 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -5,7 +5,6 @@ from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase @@ -18,8 +17,14 @@ "AgentChatbot", "AgentChatbotIdentifier", "AgentDeployment", + "AgentModel", + "AgentModelAgreement", + "AgentModelVersion", "AgentTemplate", "AgentTemplateGuardrail", + "AgentTemplateModel", + "AgentTemplateModelAgreement", + "AgentTemplateModelVersion", ] @@ -69,12 +74,140 @@ class AgentDeployment(BaseModel): visibility: Optional[APIDeploymentVisibility] = None +class AgentModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class AgentModel(BaseModel): + agreement: Optional[AgentModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[AgentModelVersion] = None + + class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None +class AgentTemplateModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentTemplateModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class AgentTemplateModel(BaseModel): + agreement: Optional[AgentTemplateModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[AgentTemplateModelVersion] = None + + class AgentTemplate(BaseModel): created_at: Optional[datetime] = None @@ -92,7 +225,7 @@ class AgentTemplate(BaseModel): max_tokens: Optional[int] = None - model: Optional[APIModel] = None + model: Optional[AgentTemplateModel] = None name: Optional[str] = None @@ -143,7 +276,7 @@ class Agent(BaseModel): response. """ - model: Optional[APIModel] = None + model: Optional[AgentModel] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 0a8df679..3eb01fc7 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -7,7 +7,6 @@ from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod from .api_agent_api_key_info import APIAgentAPIKeyInfo @@ -23,8 +22,14 @@ "Deployment", "Function", "Guardrail", + "Model", + "ModelAgreement", + "ModelVersion", "Template", "TemplateGuardrail", + "TemplateModel", + "TemplateModelAgreement", + "TemplateModelVersion", ] @@ -139,12 +144,140 @@ class Guardrail(BaseModel): uuid: Optional[str] = None +class ModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class ModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class Model(BaseModel): + agreement: Optional[ModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[ModelVersion] = None + + class TemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None +class TemplateModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class TemplateModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class TemplateModel(BaseModel): + agreement: Optional[TemplateModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[TemplateModelVersion] = None + + class Template(BaseModel): created_at: Optional[datetime] = None @@ -162,7 +295,7 @@ class Template(BaseModel): max_tokens: Optional[int] = None - model: Optional[APIModel] = None + model: Optional[TemplateModel] = None name: Optional[str] = None @@ -222,7 +355,7 @@ class APIAgent(BaseModel): max_tokens: Optional[int] = None - model: Optional[APIModel] = None + model: Optional[Model] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index 82120454..ac6f9c55 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -1,8 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import Optional from datetime import datetime -from typing_extensions import Literal from .._models import BaseModel @@ -32,42 +31,18 @@ class APIModel(BaseModel): created_at: Optional[datetime] = None - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - is_foundational: Optional[bool] = None - metadata: Optional[object] = None - name: Optional[str] = None parent_uuid: Optional[str] = None - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - updated_at: Optional[datetime] = None upload_complete: Optional[bool] = None url: Optional[str] = None - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - uuid: Optional[str] = None version: Optional[Version] = None diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index 39328f80..0f57136d 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -2,11 +2,75 @@ from typing import List, Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel -__all__ = ["APIOpenAIAPIKeyInfo"] +__all__ = ["APIOpenAIAPIKeyInfo", "Model", "ModelAgreement", "ModelVersion"] + + +class ModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class ModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class Model(BaseModel): + agreement: Optional[ModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[ModelVersion] = None class APIOpenAIAPIKeyInfo(BaseModel): @@ -16,7 +80,7 @@ class APIOpenAIAPIKeyInfo(BaseModel): deleted_at: Optional[datetime] = None - models: Optional[List[APIModel]] = None + models: Optional[List[Model]] = None name: Optional[str] = None diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py deleted file mode 100644 index cba51b07..00000000 --- a/src/gradientai/types/model.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["Model", "Agreement", "Version"] - - -class Agreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class Version(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[Agreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[Version] = None diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 93d9ae04..e6f5fad5 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -2,8 +2,8 @@ from typing import List, Optional -from .model import Model from .._models import BaseModel +from .api_model import APIModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks @@ -15,4 +15,4 @@ class ModelListResponse(BaseModel): meta: Optional[APIMeta] = None - models: Optional[List[Model]] = None + models: Optional[List[APIModel]] = None From 89fdeb85dc188166fca75b3309985fb1c3d5d2bf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 14:07:44 +0000 Subject: [PATCH 046/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 16 +- api.md | 70 ++--- src/gradientai/_client.py | 40 +-- src/gradientai/resources/__init__.py | 28 +- .../{agents => doagents}/__init__.py | 28 +- .../{agents => doagents}/api_keys.py | 12 +- .../{agents => doagents}/child_agents.py | 10 +- .../agents.py => doagents/doagents.py} | 218 +++++++-------- .../{agents => doagents}/functions.py | 8 +- .../{agents => doagents}/knowledge_bases.py | 4 +- .../{agents => doagents}/versions.py | 6 +- src/gradientai/types/__init__.py | 20 +- .../types/agent_update_status_response.py | 16 -- ...ate_params.py => doagent_create_params.py} | 4 +- ...response.py => doagent_create_response.py} | 4 +- ...response.py => doagent_delete_response.py} | 4 +- ..._list_params.py => doagent_list_params.py} | 4 +- ...t_response.py => doagent_list_response.py} | 8 +- ...sponse.py => doagent_retrieve_response.py} | 4 +- ...ate_params.py => doagent_update_params.py} | 4 +- ...response.py => doagent_update_response.py} | 4 +- ...ams.py => doagent_update_status_params.py} | 4 +- .../types/doagent_update_status_response.py | 16 ++ .../types/{agents => doagents}/__init__.py | 0 .../api_key_create_params.py | 0 .../api_key_create_response.py | 0 .../api_key_delete_response.py | 0 .../api_key_list_params.py | 0 .../api_key_list_response.py | 0 .../api_key_regenerate_response.py | 0 .../api_key_update_params.py | 0 .../api_key_update_response.py | 0 .../api_link_knowledge_base_output.py | 0 .../types/{agents => doagents}/api_links.py | 0 .../types/{agents => doagents}/api_meta.py | 0 .../child_agent_add_params.py | 0 .../child_agent_add_response.py | 0 .../child_agent_delete_response.py | 0 .../child_agent_update_params.py | 0 .../child_agent_update_response.py | 0 .../child_agent_view_response.py | 0 .../function_create_params.py | 0 .../function_create_response.py | 0 .../function_delete_response.py | 0 .../function_update_params.py | 0 .../function_update_response.py | 0 .../knowledge_base_detach_response.py | 0 .../version_list_params.py | 0 .../version_list_response.py | 0 .../version_update_params.py | 0 .../version_update_response.py | 0 .../types/indexing_job_list_response.py | 4 +- .../types/inference/api_key_list_response.py | 4 +- .../types/knowledge_base_list_response.py | 4 +- .../data_source_list_response.py | 4 +- src/gradientai/types/model_list_response.py | 4 +- .../anthropic/key_list_agents_response.py | 4 +- .../providers/anthropic/key_list_response.py | 4 +- .../providers/openai/key_list_response.py | 4 +- .../openai/key_retrieve_agents_response.py | 4 +- .../{agents => doagents}/__init__.py | 0 .../{agents => doagents}/test_api_keys.py | 106 ++++---- .../{agents => doagents}/test_child_agents.py | 86 +++--- .../{agents => doagents}/test_functions.py | 66 ++--- .../test_knowledge_bases.py | 58 ++-- .../{agents => doagents}/test_versions.py | 42 +-- .../{test_agents.py => test_doagents.py} | 256 +++++++++--------- tests/test_client.py | 20 +- 69 files changed, 602 insertions(+), 602 deletions(-) rename src/gradientai/resources/{agents => doagents}/__init__.py (84%) rename src/gradientai/resources/{agents => doagents}/api_keys.py (98%) rename src/gradientai/resources/{agents => doagents}/child_agents.py (98%) rename src/gradientai/resources/{agents/agents.py => doagents/doagents.py} (87%) rename src/gradientai/resources/{agents => doagents}/functions.py (98%) rename src/gradientai/resources/{agents => doagents}/knowledge_bases.py (98%) rename src/gradientai/resources/{agents => doagents}/versions.py (98%) delete mode 100644 src/gradientai/types/agent_update_status_response.py rename src/gradientai/types/{agent_create_params.py => doagent_create_params.py} (90%) rename src/gradientai/types/{agent_update_response.py => doagent_create_response.py} (77%) rename src/gradientai/types/{agent_retrieve_response.py => doagent_delete_response.py} (77%) rename src/gradientai/types/{agent_list_params.py => doagent_list_params.py} (79%) rename src/gradientai/types/{agent_list_response.py => doagent_list_response.py} (98%) rename src/gradientai/types/{agent_delete_response.py => doagent_retrieve_response.py} (76%) rename src/gradientai/types/{agent_update_params.py => doagent_update_params.py} (95%) rename src/gradientai/types/{agent_create_response.py => doagent_update_response.py} (77%) rename src/gradientai/types/{agent_update_status_params.py => doagent_update_status_params.py} (79%) create mode 100644 src/gradientai/types/doagent_update_status_response.py rename src/gradientai/types/{agents => doagents}/__init__.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_create_params.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_create_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_delete_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_list_params.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_list_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_regenerate_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_update_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_link_knowledge_base_output.py (100%) rename src/gradientai/types/{agents => doagents}/api_links.py (100%) rename src/gradientai/types/{agents => doagents}/api_meta.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_add_params.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_add_response.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_delete_response.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_update_response.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_view_response.py (100%) rename src/gradientai/types/{agents => doagents}/function_create_params.py (100%) rename src/gradientai/types/{agents => doagents}/function_create_response.py (100%) rename src/gradientai/types/{agents => doagents}/function_delete_response.py (100%) rename src/gradientai/types/{agents => doagents}/function_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/function_update_response.py (100%) rename src/gradientai/types/{agents => doagents}/knowledge_base_detach_response.py (100%) rename src/gradientai/types/{agents => doagents}/version_list_params.py (100%) rename src/gradientai/types/{agents => doagents}/version_list_response.py (100%) rename src/gradientai/types/{agents => doagents}/version_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/version_update_response.py (100%) rename tests/api_resources/{agents => doagents}/__init__.py (100%) rename tests/api_resources/{agents => doagents}/test_api_keys.py (84%) rename tests/api_resources/{agents => doagents}/test_child_agents.py (84%) rename tests/api_resources/{agents => doagents}/test_functions.py (85%) rename tests/api_resources/{agents => doagents}/test_knowledge_bases.py (82%) rename tests/api_resources/{agents => doagents}/test_versions.py (84%) rename tests/api_resources/{test_agents.py => test_doagents.py} (66%) diff --git a/.stats.yml b/.stats.yml index 8f85d58c..0e1ae316 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 +config_hash: c424a9395cc2b0dbf298813e54562194 diff --git a/README.md b/README.md index 36edcfbd..d047f658 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ client = GradientAI( ), # This is the default and can be omitted ) -versions = client.agents.versions.list( +versions = client.doagents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -61,7 +61,7 @@ client = AsyncGradientAI( async def main() -> None: - versions = await client.agents.versions.list( + versions = await client.doagents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -113,7 +113,7 @@ from gradientai import GradientAI client = GradientAI() try: - client.agents.versions.list( + client.doagents.versions.list( uuid="REPLACE_ME", ) except gradientai.APIConnectionError as e: @@ -158,7 +158,7 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).agents.versions.list( +client.with_options(max_retries=5).doagents.versions.list( uuid="REPLACE_ME", ) ``` @@ -183,7 +183,7 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).agents.versions.list( +client.with_options(timeout=5.0).doagents.versions.list( uuid="REPLACE_ME", ) ``` @@ -226,12 +226,12 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.agents.versions.with_raw_response.list( +response = client.doagents.versions.with_raw_response.list( uuid="REPLACE_ME", ) print(response.headers.get('X-My-Header')) -version = response.parse() # get the object that `agents.versions.list()` would have returned +version = response.parse() # get the object that `doagents.versions.list()` would have returned print(version.agent_versions) ``` @@ -246,7 +246,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.agents.versions.with_streaming_response.list( +with client.doagents.versions.with_streaming_response.list( uuid="REPLACE_ME", ) as response: print(response.headers.get("X-My-Header")) diff --git a/api.md b/api.md index 2376a11f..0bc41bbe 100644 --- a/api.md +++ b/api.md @@ -1,4 +1,4 @@ -# Agents +# Doagents Types: @@ -10,30 +10,30 @@ from gradientai.types import ( APIDeploymentVisibility, APIOpenAIAPIKeyInfo, APIRetrievalMethod, - AgentCreateResponse, - AgentRetrieveResponse, - AgentUpdateResponse, - AgentListResponse, - AgentDeleteResponse, - AgentUpdateStatusResponse, + DoagentCreateResponse, + DoagentRetrieveResponse, + DoagentUpdateResponse, + DoagentListResponse, + DoagentDeleteResponse, + DoagentUpdateStatusResponse, ) ``` Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.doagents.create(\*\*params) -> DoagentCreateResponse +- client.doagents.retrieve(uuid) -> DoagentRetrieveResponse +- client.doagents.update(path_uuid, \*\*params) -> DoagentUpdateResponse +- client.doagents.list(\*\*params) -> DoagentListResponse +- client.doagents.delete(uuid) -> DoagentDeleteResponse +- client.doagents.update_status(path_uuid, \*\*params) -> DoagentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.agents import ( +from gradientai.types.doagents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -44,18 +44,18 @@ from gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.doagents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.doagents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.doagents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.doagents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.doagents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Functions Types: ```python -from gradientai.types.agents import ( +from gradientai.types.doagents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -64,43 +64,43 @@ from gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.doagents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.doagents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.doagents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse +from gradientai.types.doagents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.doagents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.doagents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.doagents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.doagents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.doagents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## ChildAgents Types: ```python -from gradientai.types.agents import ( +from gradientai.types.doagents import ( ChildAgentUpdateResponse, ChildAgentDeleteResponse, ChildAgentAddResponse, @@ -110,10 +110,10 @@ from gradientai.types.agents import ( Methods: -- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.doagents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.doagents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.doagents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.doagents.child_agents.view(uuid) -> ChildAgentViewResponse # Providers diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 0a5eb9a1..992559a2 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,12 +31,12 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, models, regions, doagents, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource - from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.doagents.doagents import DoagentsResource, AsyncDoagentsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -110,10 +110,10 @@ def __init__( ) @cached_property - def agents(self) -> AgentsResource: - from .resources.agents import AgentsResource + def doagents(self) -> DoagentsResource: + from .resources.doagents import DoagentsResource - return AgentsResource(self) + return DoagentsResource(self) @cached_property def providers(self) -> ProvidersResource: @@ -329,10 +329,10 @@ def __init__( ) @cached_property - def agents(self) -> AsyncAgentsResource: - from .resources.agents import AsyncAgentsResource + def doagents(self) -> AsyncDoagentsResource: + from .resources.doagents import AsyncDoagentsResource - return AsyncAgentsResource(self) + return AsyncDoagentsResource(self) @cached_property def providers(self) -> AsyncProvidersResource: @@ -498,10 +498,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AgentsResourceWithRawResponse: - from .resources.agents import AgentsResourceWithRawResponse + def doagents(self) -> doagents.DoagentsResourceWithRawResponse: + from .resources.doagents import DoagentsResourceWithRawResponse - return AgentsResourceWithRawResponse(self._client.agents) + return DoagentsResourceWithRawResponse(self._client.doagents) @cached_property def providers(self) -> providers.ProvidersResourceWithRawResponse: @@ -553,10 +553,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: - from .resources.agents import AsyncAgentsResourceWithRawResponse + def doagents(self) -> doagents.AsyncDoagentsResourceWithRawResponse: + from .resources.doagents import AsyncDoagentsResourceWithRawResponse - return AsyncAgentsResourceWithRawResponse(self._client.agents) + return AsyncDoagentsResourceWithRawResponse(self._client.doagents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: @@ -608,10 +608,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AgentsResourceWithStreamingResponse: - from .resources.agents import AgentsResourceWithStreamingResponse + def doagents(self) -> doagents.DoagentsResourceWithStreamingResponse: + from .resources.doagents import DoagentsResourceWithStreamingResponse - return AgentsResourceWithStreamingResponse(self._client.agents) + return DoagentsResourceWithStreamingResponse(self._client.doagents) @cached_property def providers(self) -> providers.ProvidersResourceWithStreamingResponse: @@ -663,10 +663,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: - from .resources.agents import AsyncAgentsResourceWithStreamingResponse + def doagents(self) -> doagents.AsyncDoagentsResourceWithStreamingResponse: + from .resources.doagents import AsyncDoagentsResourceWithStreamingResponse - return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + return AsyncDoagentsResourceWithStreamingResponse(self._client.doagents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 1763a13e..17791967 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,14 +8,6 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) from .models import ( ModelsResource, AsyncModelsResource, @@ -32,6 +24,14 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) +from .doagents import ( + DoagentsResource, + AsyncDoagentsResource, + DoagentsResourceWithRawResponse, + AsyncDoagentsResourceWithRawResponse, + DoagentsResourceWithStreamingResponse, + AsyncDoagentsResourceWithStreamingResponse, +) from .inference import ( InferenceResource, AsyncInferenceResource, @@ -66,12 +66,12 @@ ) __all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", + "DoagentsResource", + "AsyncDoagentsResource", + "DoagentsResourceWithRawResponse", + "AsyncDoagentsResourceWithRawResponse", + "DoagentsResourceWithStreamingResponse", + "AsyncDoagentsResourceWithStreamingResponse", "ProvidersResource", "AsyncProvidersResource", "ProvidersResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/doagents/__init__.py similarity index 84% rename from src/gradientai/resources/agents/__init__.py rename to src/gradientai/resources/doagents/__init__.py index f41a0408..5ee3485f 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/doagents/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -16,6 +8,14 @@ APIKeysResourceWithStreamingResponse, AsyncAPIKeysResourceWithStreamingResponse, ) +from .doagents import ( + DoagentsResource, + AsyncDoagentsResource, + DoagentsResourceWithRawResponse, + AsyncDoagentsResourceWithRawResponse, + DoagentsResourceWithStreamingResponse, + AsyncDoagentsResourceWithStreamingResponse, +) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -80,10 +80,10 @@ "AsyncChildAgentsResourceWithRawResponse", "ChildAgentsResourceWithStreamingResponse", "AsyncChildAgentsResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", + "DoagentsResource", + "AsyncDoagentsResource", + "DoagentsResourceWithRawResponse", + "AsyncDoagentsResourceWithRawResponse", + "DoagentsResourceWithStreamingResponse", + "AsyncDoagentsResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/doagents/api_keys.py similarity index 98% rename from src/gradientai/resources/agents/api_keys.py rename to src/gradientai/resources/doagents/api_keys.py index 155e3adc..c55249be 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/doagents/api_keys.py @@ -15,12 +15,12 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.agents.api_key_list_response import APIKeyListResponse -from ...types.agents.api_key_create_response import APIKeyCreateResponse -from ...types.agents.api_key_delete_response import APIKeyDeleteResponse -from ...types.agents.api_key_update_response import APIKeyUpdateResponse -from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse +from ...types.doagents import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.doagents.api_key_list_response import APIKeyListResponse +from ...types.doagents.api_key_create_response import APIKeyCreateResponse +from ...types.doagents.api_key_delete_response import APIKeyDeleteResponse +from ...types.doagents.api_key_update_response import APIKeyUpdateResponse +from ...types.doagents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/doagents/child_agents.py similarity index 98% rename from src/gradientai/resources/agents/child_agents.py rename to src/gradientai/resources/doagents/child_agents.py index 9031d8ce..6e8abfb7 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/doagents/child_agents.py @@ -15,11 +15,11 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import child_agent_add_params, child_agent_update_params -from ...types.agents.child_agent_add_response import ChildAgentAddResponse -from ...types.agents.child_agent_view_response import ChildAgentViewResponse -from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse +from ...types.doagents import child_agent_add_params, child_agent_update_params +from ...types.doagents.child_agent_add_response import ChildAgentAddResponse +from ...types.doagents.child_agent_view_response import ChildAgentViewResponse +from ...types.doagents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.doagents.child_agent_update_response import ChildAgentUpdateResponse __all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/doagents/doagents.py similarity index 87% rename from src/gradientai/resources/agents/agents.py rename to src/gradientai/resources/doagents/doagents.py index 78439d33..89951704 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/doagents/doagents.py @@ -9,10 +9,10 @@ from ...types import ( APIRetrievalMethod, APIDeploymentVisibility, - agent_list_params, - agent_create_params, - agent_update_params, - agent_update_status_params, + doagent_list_params, + doagent_create_params, + doagent_update_params, + doagent_update_status_params, ) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform @@ -65,19 +65,19 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) -from ...types.agent_list_response import AgentListResponse from ...types.api_retrieval_method import APIRetrievalMethod -from ...types.agent_create_response import AgentCreateResponse -from ...types.agent_delete_response import AgentDeleteResponse -from ...types.agent_update_response import AgentUpdateResponse -from ...types.agent_retrieve_response import AgentRetrieveResponse +from ...types.doagent_list_response import DoagentListResponse +from ...types.doagent_create_response import DoagentCreateResponse +from ...types.doagent_delete_response import DoagentDeleteResponse +from ...types.doagent_update_response import DoagentUpdateResponse from ...types.api_deployment_visibility import APIDeploymentVisibility -from ...types.agent_update_status_response import AgentUpdateStatusResponse +from ...types.doagent_retrieve_response import DoagentRetrieveResponse +from ...types.doagent_update_status_response import DoagentUpdateStatusResponse -__all__ = ["AgentsResource", "AsyncAgentsResource"] +__all__ = ["DoagentsResource", "AsyncDoagentsResource"] -class AgentsResource(SyncAPIResource): +class DoagentsResource(SyncAPIResource): @cached_property def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) @@ -99,23 +99,23 @@ def child_agents(self) -> ChildAgentsResource: return ChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> AgentsResourceWithRawResponse: + def with_raw_response(self) -> DoagentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return AgentsResourceWithRawResponse(self) + return DoagentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + def with_streaming_response(self) -> DoagentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return AgentsResourceWithStreamingResponse(self) + return DoagentsResourceWithStreamingResponse(self) def create( self, @@ -136,7 +136,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: + ) -> DoagentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -175,12 +175,12 @@ def create( "region": region, "tags": tags, }, - agent_create_params.AgentCreateParams, + doagent_create_params.DoagentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentCreateResponse, + cast_to=DoagentCreateResponse, ) def retrieve( @@ -193,7 +193,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: + ) -> DoagentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -217,7 +217,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentRetrieveResponse, + cast_to=DoagentRetrieveResponse, ) def update( @@ -245,7 +245,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: + ) -> DoagentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -303,12 +303,12 @@ def update( "top_p": top_p, "body_uuid": body_uuid, }, - agent_update_params.AgentUpdateParams, + doagent_update_params.DoagentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateResponse, + cast_to=DoagentUpdateResponse, ) def list( @@ -323,7 +323,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: + ) -> DoagentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -357,10 +357,10 @@ def list( "page": page, "per_page": per_page, }, - agent_list_params.AgentListParams, + doagent_list_params.DoagentListParams, ), ), - cast_to=AgentListResponse, + cast_to=DoagentListResponse, ) def delete( @@ -373,7 +373,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: + ) -> DoagentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -395,7 +395,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentDeleteResponse, + cast_to=DoagentDeleteResponse, ) def update_status( @@ -410,7 +410,7 @@ def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: + ) -> DoagentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -436,16 +436,16 @@ def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - agent_update_status_params.AgentUpdateStatusParams, + doagent_update_status_params.DoagentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateStatusResponse, + cast_to=DoagentUpdateStatusResponse, ) -class AsyncAgentsResource(AsyncAPIResource): +class AsyncDoagentsResource(AsyncAPIResource): @cached_property def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) @@ -467,23 +467,23 @@ def child_agents(self) -> AsyncChildAgentsResource: return AsyncChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + def with_raw_response(self) -> AsyncDoagentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return AsyncAgentsResourceWithRawResponse(self) + return AsyncDoagentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncDoagentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return AsyncAgentsResourceWithStreamingResponse(self) + return AsyncDoagentsResourceWithStreamingResponse(self) async def create( self, @@ -504,7 +504,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: + ) -> DoagentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -543,12 +543,12 @@ async def create( "region": region, "tags": tags, }, - agent_create_params.AgentCreateParams, + doagent_create_params.DoagentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentCreateResponse, + cast_to=DoagentCreateResponse, ) async def retrieve( @@ -561,7 +561,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: + ) -> DoagentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -585,7 +585,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentRetrieveResponse, + cast_to=DoagentRetrieveResponse, ) async def update( @@ -613,7 +613,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: + ) -> DoagentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -671,12 +671,12 @@ async def update( "top_p": top_p, "body_uuid": body_uuid, }, - agent_update_params.AgentUpdateParams, + doagent_update_params.DoagentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateResponse, + cast_to=DoagentUpdateResponse, ) async def list( @@ -691,7 +691,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: + ) -> DoagentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -725,10 +725,10 @@ async def list( "page": page, "per_page": per_page, }, - agent_list_params.AgentListParams, + doagent_list_params.DoagentListParams, ), ), - cast_to=AgentListResponse, + cast_to=DoagentListResponse, ) async def delete( @@ -741,7 +741,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: + ) -> DoagentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -763,7 +763,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentDeleteResponse, + cast_to=DoagentDeleteResponse, ) async def update_status( @@ -778,7 +778,7 @@ async def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: + ) -> DoagentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -804,186 +804,186 @@ async def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - agent_update_status_params.AgentUpdateStatusParams, + doagent_update_status_params.DoagentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateStatusResponse, + cast_to=DoagentUpdateStatusResponse, ) -class AgentsResourceWithRawResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents +class DoagentsResourceWithRawResponse: + def __init__(self, doagents: DoagentsResource) -> None: + self._doagents = doagents self.create = to_raw_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = to_raw_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = to_raw_response_wrapper( - agents.update, + doagents.update, ) self.list = to_raw_response_wrapper( - agents.list, + doagents.list, ) self.delete = to_raw_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = to_raw_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._agents.api_keys) + return APIKeysResourceWithRawResponse(self._doagents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithRawResponse: - return FunctionsResourceWithRawResponse(self._agents.functions) + return FunctionsResourceWithRawResponse(self._doagents.functions) @cached_property def versions(self) -> VersionsResourceWithRawResponse: - return VersionsResourceWithRawResponse(self._agents.versions) + return VersionsResourceWithRawResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + return KnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._agents.child_agents) + return ChildAgentsResourceWithRawResponse(self._doagents.child_agents) -class AsyncAgentsResourceWithRawResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents +class AsyncDoagentsResourceWithRawResponse: + def __init__(self, doagents: AsyncDoagentsResource) -> None: + self._doagents = doagents self.create = async_to_raw_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = async_to_raw_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = async_to_raw_response_wrapper( - agents.update, + doagents.update, ) self.list = async_to_raw_response_wrapper( - agents.list, + doagents.list, ) self.delete = async_to_raw_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = async_to_raw_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + return AsyncAPIKeysResourceWithRawResponse(self._doagents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithRawResponse: - return AsyncFunctionsResourceWithRawResponse(self._agents.functions) + return AsyncFunctionsResourceWithRawResponse(self._doagents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: - return AsyncVersionsResourceWithRawResponse(self._agents.versions) + return AsyncVersionsResourceWithRawResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) + return AsyncChildAgentsResourceWithRawResponse(self._doagents.child_agents) -class AgentsResourceWithStreamingResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents +class DoagentsResourceWithStreamingResponse: + def __init__(self, doagents: DoagentsResource) -> None: + self._doagents = doagents self.create = to_streamed_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = to_streamed_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = to_streamed_response_wrapper( - agents.update, + doagents.update, ) self.list = to_streamed_response_wrapper( - agents.list, + doagents.list, ) self.delete = to_streamed_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = to_streamed_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + return APIKeysResourceWithStreamingResponse(self._doagents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithStreamingResponse: - return FunctionsResourceWithStreamingResponse(self._agents.functions) + return FunctionsResourceWithStreamingResponse(self._doagents.functions) @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: - return VersionsResourceWithStreamingResponse(self._agents.versions) + return VersionsResourceWithStreamingResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + return KnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + return ChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) -class AsyncAgentsResourceWithStreamingResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents +class AsyncDoagentsResourceWithStreamingResponse: + def __init__(self, doagents: AsyncDoagentsResource) -> None: + self._doagents = doagents self.create = async_to_streamed_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = async_to_streamed_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = async_to_streamed_response_wrapper( - agents.update, + doagents.update, ) self.list = async_to_streamed_response_wrapper( - agents.list, + doagents.list, ) self.delete = async_to_streamed_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = async_to_streamed_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + return AsyncAPIKeysResourceWithStreamingResponse(self._doagents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: - return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) + return AsyncFunctionsResourceWithStreamingResponse(self._doagents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: - return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) + return AsyncVersionsResourceWithStreamingResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + return AsyncChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/doagents/functions.py similarity index 98% rename from src/gradientai/resources/agents/functions.py rename to src/gradientai/resources/doagents/functions.py index 67a811cc..65ab2801 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/doagents/functions.py @@ -15,10 +15,10 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import function_create_params, function_update_params -from ...types.agents.function_create_response import FunctionCreateResponse -from ...types.agents.function_delete_response import FunctionDeleteResponse -from ...types.agents.function_update_response import FunctionUpdateResponse +from ...types.doagents import function_create_params, function_update_params +from ...types.doagents.function_create_response import FunctionCreateResponse +from ...types.doagents.function_delete_response import FunctionDeleteResponse +from ...types.doagents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/doagents/knowledge_bases.py similarity index 98% rename from src/gradientai/resources/agents/knowledge_bases.py rename to src/gradientai/resources/doagents/knowledge_bases.py index 3b9b0cd2..e806d7a2 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/doagents/knowledge_bases.py @@ -14,8 +14,8 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse +from ...types.doagents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.doagents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/doagents/versions.py similarity index 98% rename from src/gradientai/resources/agents/versions.py rename to src/gradientai/resources/doagents/versions.py index 86dbf99f..6301bc0a 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/doagents/versions.py @@ -15,9 +15,9 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import version_list_params, version_update_params -from ...types.agents.version_list_response import VersionListResponse -from ...types.agents.version_update_response import VersionUpdateResponse +from ...types.doagents import version_list_params, version_update_params +from ...types.doagents.version_list_response import VersionListResponse +from ...types.doagents.version_update_response import VersionUpdateResponse __all__ = ["VersionsResource", "AsyncVersionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 5ee961c6..09d071f0 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -5,34 +5,34 @@ from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob -from .agent_list_params import AgentListParams as AgentListParams from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams -from .agent_create_params import AgentCreateParams as AgentCreateParams -from .agent_list_response import AgentListResponse as AgentListResponse -from .agent_update_params import AgentUpdateParams as AgentUpdateParams +from .doagent_list_params import DoagentListParams as DoagentListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod from .region_list_response import RegionListResponse as RegionListResponse -from .agent_create_response import AgentCreateResponse as AgentCreateResponse -from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse -from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .doagent_create_params import DoagentCreateParams as DoagentCreateParams +from .doagent_list_response import DoagentListResponse as DoagentListResponse +from .doagent_update_params import DoagentUpdateParams as DoagentUpdateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .doagent_create_response import DoagentCreateResponse as DoagentCreateResponse +from .doagent_delete_response import DoagentDeleteResponse as DoagentDeleteResponse +from .doagent_update_response import DoagentUpdateResponse as DoagentUpdateResponse from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams +from .doagent_retrieve_response import DoagentRetrieveResponse as DoagentRetrieveResponse from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse +from .doagent_update_status_params import DoagentUpdateStatusParams as DoagentUpdateStatusParams from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .doagent_update_status_response import DoagentUpdateStatusResponse as DoagentUpdateStatusResponse from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py deleted file mode 100644 index b200f99d..00000000 --- a/src/gradientai/types/agent_update_status_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentUpdateStatusResponse"] - - -class AgentUpdateStatusResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/doagent_create_params.py similarity index 90% rename from src/gradientai/types/agent_create_params.py rename to src/gradientai/types/doagent_create_params.py index 58b99df7..b5b6e72d 100644 --- a/src/gradientai/types/agent_create_params.py +++ b/src/gradientai/types/doagent_create_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo -__all__ = ["AgentCreateParams"] +__all__ = ["DoagentCreateParams"] -class AgentCreateParams(TypedDict, total=False): +class DoagentCreateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/doagent_create_response.py similarity index 77% rename from src/gradientai/types/agent_update_response.py rename to src/gradientai/types/doagent_create_response.py index 2948aa1c..2d171436 100644 --- a/src/gradientai/types/agent_update_response.py +++ b/src/gradientai/types/doagent_create_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentUpdateResponse"] +__all__ = ["DoagentCreateResponse"] -class AgentUpdateResponse(BaseModel): +class DoagentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/doagent_delete_response.py similarity index 77% rename from src/gradientai/types/agent_retrieve_response.py rename to src/gradientai/types/doagent_delete_response.py index 2eed88af..5d90ba17 100644 --- a/src/gradientai/types/agent_retrieve_response.py +++ b/src/gradientai/types/doagent_delete_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentRetrieveResponse"] +__all__ = ["DoagentDeleteResponse"] -class AgentRetrieveResponse(BaseModel): +class DoagentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/doagent_list_params.py similarity index 79% rename from src/gradientai/types/agent_list_params.py rename to src/gradientai/types/doagent_list_params.py index e13a10c9..a9b3fb2b 100644 --- a/src/gradientai/types/agent_list_params.py +++ b/src/gradientai/types/doagent_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["AgentListParams"] +__all__ = ["DoagentListParams"] -class AgentListParams(TypedDict, total=False): +class DoagentListParams(TypedDict, total=False): only_deployed: bool """only list agents that are deployed.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/doagent_list_response.py similarity index 98% rename from src/gradientai/types/agent_list_response.py rename to src/gradientai/types/doagent_list_response.py index 6af9cd51..65c2b076 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/doagent_list_response.py @@ -5,14 +5,14 @@ from typing_extensions import Literal from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .doagents.api_meta import APIMeta from .api_knowledge_base import APIKnowledgeBase +from .doagents.api_links import APILinks from .api_retrieval_method import APIRetrievalMethod from .api_deployment_visibility import APIDeploymentVisibility __all__ = [ - "AgentListResponse", + "DoagentListResponse", "Agent", "AgentChatbot", "AgentChatbotIdentifier", @@ -323,7 +323,7 @@ class Agent(BaseModel): uuid: Optional[str] = None -class AgentListResponse(BaseModel): +class DoagentListResponse(BaseModel): agents: Optional[List[Agent]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/doagent_retrieve_response.py similarity index 76% rename from src/gradientai/types/agent_delete_response.py rename to src/gradientai/types/doagent_retrieve_response.py index eb1d440d..9fb0a722 100644 --- a/src/gradientai/types/agent_delete_response.py +++ b/src/gradientai/types/doagent_retrieve_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentDeleteResponse"] +__all__ = ["DoagentRetrieveResponse"] -class AgentDeleteResponse(BaseModel): +class DoagentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/doagent_update_params.py similarity index 95% rename from src/gradientai/types/agent_update_params.py rename to src/gradientai/types/doagent_update_params.py index 85f9a9c2..a8598f5e 100644 --- a/src/gradientai/types/agent_update_params.py +++ b/src/gradientai/types/doagent_update_params.py @@ -8,10 +8,10 @@ from .._utils import PropertyInfo from .api_retrieval_method import APIRetrievalMethod -__all__ = ["AgentUpdateParams"] +__all__ = ["DoagentUpdateParams"] -class AgentUpdateParams(TypedDict, total=False): +class DoagentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/doagent_update_response.py similarity index 77% rename from src/gradientai/types/agent_create_response.py rename to src/gradientai/types/doagent_update_response.py index 48545fe9..4d48bee7 100644 --- a/src/gradientai/types/agent_create_response.py +++ b/src/gradientai/types/doagent_update_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentCreateResponse"] +__all__ = ["DoagentUpdateResponse"] -class AgentCreateResponse(BaseModel): +class DoagentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/doagent_update_status_params.py similarity index 79% rename from src/gradientai/types/agent_update_status_params.py rename to src/gradientai/types/doagent_update_status_params.py index a0cdc0b9..3bd0c539 100644 --- a/src/gradientai/types/agent_update_status_params.py +++ b/src/gradientai/types/doagent_update_status_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo from .api_deployment_visibility import APIDeploymentVisibility -__all__ = ["AgentUpdateStatusParams"] +__all__ = ["DoagentUpdateStatusParams"] -class AgentUpdateStatusParams(TypedDict, total=False): +class DoagentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] visibility: APIDeploymentVisibility diff --git a/src/gradientai/types/doagent_update_status_response.py b/src/gradientai/types/doagent_update_status_response.py new file mode 100644 index 00000000..b31c1e99 --- /dev/null +++ b/src/gradientai/types/doagent_update_status_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["DoagentUpdateStatusResponse"] + + +class DoagentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/doagents/__init__.py similarity index 100% rename from src/gradientai/types/agents/__init__.py rename to src/gradientai/types/doagents/__init__.py diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/doagents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_params.py rename to src/gradientai/types/doagents/api_key_create_params.py diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/doagents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_response.py rename to src/gradientai/types/doagents/api_key_create_response.py diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/doagents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_delete_response.py rename to src/gradientai/types/doagents/api_key_delete_response.py diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/doagents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_params.py rename to src/gradientai/types/doagents/api_key_list_params.py diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/doagents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_response.py rename to src/gradientai/types/doagents/api_key_list_response.py diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/doagents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_regenerate_response.py rename to src/gradientai/types/doagents/api_key_regenerate_response.py diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/doagents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_params.py rename to src/gradientai/types/doagents/api_key_update_params.py diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/doagents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_response.py rename to src/gradientai/types/doagents/api_key_update_response.py diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/doagents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/agents/api_link_knowledge_base_output.py rename to src/gradientai/types/doagents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/agents/api_links.py b/src/gradientai/types/doagents/api_links.py similarity index 100% rename from src/gradientai/types/agents/api_links.py rename to src/gradientai/types/doagents/api_links.py diff --git a/src/gradientai/types/agents/api_meta.py b/src/gradientai/types/doagents/api_meta.py similarity index 100% rename from src/gradientai/types/agents/api_meta.py rename to src/gradientai/types/doagents/api_meta.py diff --git a/src/gradientai/types/agents/child_agent_add_params.py b/src/gradientai/types/doagents/child_agent_add_params.py similarity index 100% rename from src/gradientai/types/agents/child_agent_add_params.py rename to src/gradientai/types/doagents/child_agent_add_params.py diff --git a/src/gradientai/types/agents/child_agent_add_response.py b/src/gradientai/types/doagents/child_agent_add_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_add_response.py rename to src/gradientai/types/doagents/child_agent_add_response.py diff --git a/src/gradientai/types/agents/child_agent_delete_response.py b/src/gradientai/types/doagents/child_agent_delete_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_delete_response.py rename to src/gradientai/types/doagents/child_agent_delete_response.py diff --git a/src/gradientai/types/agents/child_agent_update_params.py b/src/gradientai/types/doagents/child_agent_update_params.py similarity index 100% rename from src/gradientai/types/agents/child_agent_update_params.py rename to src/gradientai/types/doagents/child_agent_update_params.py diff --git a/src/gradientai/types/agents/child_agent_update_response.py b/src/gradientai/types/doagents/child_agent_update_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_update_response.py rename to src/gradientai/types/doagents/child_agent_update_response.py diff --git a/src/gradientai/types/agents/child_agent_view_response.py b/src/gradientai/types/doagents/child_agent_view_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_view_response.py rename to src/gradientai/types/doagents/child_agent_view_response.py diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/doagents/function_create_params.py similarity index 100% rename from src/gradientai/types/agents/function_create_params.py rename to src/gradientai/types/doagents/function_create_params.py diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/doagents/function_create_response.py similarity index 100% rename from src/gradientai/types/agents/function_create_response.py rename to src/gradientai/types/doagents/function_create_response.py diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/doagents/function_delete_response.py similarity index 100% rename from src/gradientai/types/agents/function_delete_response.py rename to src/gradientai/types/doagents/function_delete_response.py diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/doagents/function_update_params.py similarity index 100% rename from src/gradientai/types/agents/function_update_params.py rename to src/gradientai/types/doagents/function_update_params.py diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/doagents/function_update_response.py similarity index 100% rename from src/gradientai/types/agents/function_update_response.py rename to src/gradientai/types/doagents/function_update_response.py diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/doagents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/agents/knowledge_base_detach_response.py rename to src/gradientai/types/doagents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/agents/version_list_params.py b/src/gradientai/types/doagents/version_list_params.py similarity index 100% rename from src/gradientai/types/agents/version_list_params.py rename to src/gradientai/types/doagents/version_list_params.py diff --git a/src/gradientai/types/agents/version_list_response.py b/src/gradientai/types/doagents/version_list_response.py similarity index 100% rename from src/gradientai/types/agents/version_list_response.py rename to src/gradientai/types/doagents/version_list_response.py diff --git a/src/gradientai/types/agents/version_update_params.py b/src/gradientai/types/doagents/version_update_params.py similarity index 100% rename from src/gradientai/types/agents/version_update_params.py rename to src/gradientai/types/doagents/version_update_params.py diff --git a/src/gradientai/types/agents/version_update_response.py b/src/gradientai/types/doagents/version_update_response.py similarity index 100% rename from src/gradientai/types/agents/version_update_response.py rename to src/gradientai/types/doagents/version_update_response.py diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py index 1379cc55..dc94b966 100644 --- a/src/gradientai/types/indexing_job_list_response.py +++ b/src/gradientai/types/indexing_job_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob +from .doagents.api_meta import APIMeta +from .doagents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 535e2f96..9cbc4bd5 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks +from ..doagents.api_meta import APIMeta +from ..doagents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index 09ca1ad3..4fa7536d 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .doagents.api_meta import APIMeta from .api_knowledge_base import APIKnowledgeBase +from .doagents.api_links import APILinks __all__ = ["KnowledgeBaseListResponse"] diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index 78246ce1..d0c16c12 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks +from ..doagents.api_meta import APIMeta +from ..doagents.api_links import APILinks from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource __all__ = ["DataSourceListResponse"] diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index e6f5fad5..1eb8f907 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -4,8 +4,8 @@ from .._models import BaseModel from .api_model import APIModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .doagents.api_meta import APIMeta +from .doagents.api_links import APILinks __all__ = ["ModelListResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/gradientai/types/providers/anthropic/key_list_agents_response.py index ba6ca946..174b5ea0 100644 --- a/src/gradientai/types/providers/anthropic/key_list_agents_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks __all__ = ["KeyListAgentsResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/gradientai/types/providers/anthropic/key_list_response.py index d0b84e96..7699e23b 100644 --- a/src/gradientai/types/providers/anthropic/key_list_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/gradientai/types/providers/openai/key_list_response.py index c263cba3..68a74cd1 100644 --- a/src/gradientai/types/providers/openai/key_list_response.py +++ b/src/gradientai/types/providers/openai/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py index f42edea6..9393fe08 100644 --- a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks __all__ = ["KeyRetrieveAgentsResponse"] diff --git a/tests/api_resources/agents/__init__.py b/tests/api_resources/doagents/__init__.py similarity index 100% rename from tests/api_resources/agents/__init__.py rename to tests/api_resources/doagents/__init__.py diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/doagents/test_api_keys.py similarity index 84% rename from tests/api_resources/agents/test_api_keys.py rename to tests/api_resources/doagents/test_api_keys.py index e8489258..dd654e83 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/doagents/test_api_keys.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,7 +26,7 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.create( + api_key = client.doagents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -34,7 +34,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.create( + api_key = client.doagents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.create( + response = client.doagents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.create( + with client.doagents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -71,14 +71,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.create( + client.doagents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.update( + api_key = client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -87,7 +87,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.update( + api_key = client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -99,7 +99,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.update( + response = client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -112,7 +112,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.update( + with client.doagents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -128,13 +128,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( + client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( + client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -142,7 +142,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.list( + api_key = client.doagents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -150,7 +150,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.list( + api_key = client.doagents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -160,7 +160,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.list( + response = client.doagents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.list( + with client.doagents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -187,14 +187,14 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.list( + client.doagents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.delete( + api_key = client.doagents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.delete( + response = client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.delete( + with client.doagents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -232,13 +232,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( + client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( + client.doagents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -246,7 +246,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.regenerate( + api_key = client.doagents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -255,7 +255,7 @@ def test_method_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.regenerate( + response = client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.regenerate( + with client.doagents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -284,13 +284,13 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( + client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( + client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.create( + api_key = await async_client.doagents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -310,7 +310,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.create( + api_key = await async_client.doagents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -320,7 +320,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.create( + response = await async_client.doagents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.create( + async with async_client.doagents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -347,14 +347,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.create( + await async_client.doagents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.update( + api_key = await async_client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -363,7 +363,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.update( + api_key = await async_client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -375,7 +375,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.update( + response = await async_client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -388,7 +388,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.update( + async with async_client.doagents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -404,13 +404,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( + await async_client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( + await async_client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -418,7 +418,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.list( + api_key = await async_client.doagents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -426,7 +426,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.list( + api_key = await async_client.doagents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -436,7 +436,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.list( + response = await async_client.doagents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -448,7 +448,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.list( + async with async_client.doagents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -463,14 +463,14 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.list( + await async_client.doagents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.delete( + api_key = await async_client.doagents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -479,7 +479,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.delete( + response = await async_client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -492,7 +492,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.delete( + async with async_client.doagents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -508,13 +508,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( + await async_client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( + await async_client.doagents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -522,7 +522,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.regenerate( + api_key = await async_client.doagents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -531,7 +531,7 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.regenerate( + response = await async_client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -544,7 +544,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.regenerate( + async with async_client.doagents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -560,13 +560,13 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI @parametrize async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( + await async_client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( + await async_client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/doagents/test_child_agents.py similarity index 84% rename from tests/api_resources/agents/test_child_agents.py rename to tests/api_resources/doagents/test_child_agents.py index 14af3b93..8e0eb0a0 100644 --- a/tests/api_resources/agents/test_child_agents.py +++ b/tests/api_resources/doagents/test_child_agents.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( ChildAgentAddResponse, ChildAgentViewResponse, ChildAgentDeleteResponse, @@ -25,7 +25,7 @@ class TestChildAgents: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.update( + child_agent = client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -34,7 +34,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.update( + child_agent = client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -48,7 +48,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.update( + response = client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -61,7 +61,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.update( + with client.doagents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -79,13 +79,13 @@ def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.agents.child_agents.with_raw_response.update( + client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.update( + client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -93,7 +93,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.delete( + child_agent = client.doagents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -102,7 +102,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.delete( + response = client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -115,7 +115,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.delete( + with client.doagents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( + client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( + client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -145,7 +145,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.add( + child_agent = client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -154,7 +154,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.add( + child_agent = client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -167,7 +167,7 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_add(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.add( + response = client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -180,7 +180,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.add( + with client.doagents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -198,13 +198,13 @@ def test_path_params_add(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.agents.child_agents.with_raw_response.add( + client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.add( + client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -212,7 +212,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_view(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.view( + child_agent = client.doagents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -220,7 +220,7 @@ def test_method_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_view(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.view( + response = client.doagents.child_agents.with_raw_response.view( "uuid", ) @@ -232,7 +232,7 @@ def test_raw_response_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_view(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.view( + with client.doagents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -247,7 +247,7 @@ def test_streaming_response_view(self, client: GradientAI) -> None: @parametrize def test_path_params_view(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.child_agents.with_raw_response.view( + client.doagents.child_agents.with_raw_response.view( "", ) @@ -258,7 +258,7 @@ class TestAsyncChildAgents: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.update( + child_agent = await async_client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -267,7 +267,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.update( + child_agent = await async_client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -281,7 +281,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.update( + response = await async_client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -294,7 +294,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.update( + async with async_client.doagents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -312,13 +312,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.agents.child_agents.with_raw_response.update( + await async_client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.update( + await async_client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -326,7 +326,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.delete( + child_agent = await async_client.doagents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -335,7 +335,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.delete( + response = await async_client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -348,7 +348,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.delete( + async with async_client.doagents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -364,13 +364,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( + await async_client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( + await async_client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -378,7 +378,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.add( + child_agent = await async_client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -387,7 +387,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.add( + child_agent = await async_client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -400,7 +400,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.add( + response = await async_client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -413,7 +413,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.add( + async with async_client.doagents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -431,13 +431,13 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.agents.child_agents.with_raw_response.add( + await async_client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.add( + await async_client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -445,7 +445,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_view(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.view( + child_agent = await async_client.doagents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -453,7 +453,7 @@ async def test_method_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.view( + response = await async_client.doagents.child_agents.with_raw_response.view( "uuid", ) @@ -465,7 +465,7 @@ async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.view( + async with async_client.doagents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -480,6 +480,6 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.view( + await async_client.doagents.child_agents.with_raw_response.view( "", ) diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/doagents/test_functions.py similarity index 85% rename from tests/api_resources/agents/test_functions.py rename to tests/api_resources/doagents/test_functions.py index bfb05fa6..11c76719 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/doagents/test_functions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, @@ -24,7 +24,7 @@ class TestFunctions: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - function = client.agents.functions.create( + function = client.doagents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - function = client.agents.functions.create( + function = client.doagents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.agents.functions.with_raw_response.create( + response = client.doagents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.agents.functions.with_streaming_response.create( + with client.doagents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -74,14 +74,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.create( + client.doagents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - function = client.agents.functions.update( + function = client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -90,7 +90,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - function = client.agents.functions.update( + function = client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -107,7 +107,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.functions.with_raw_response.update( + response = client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -120,7 +120,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.functions.with_streaming_response.update( + with client.doagents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -136,13 +136,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.update( + client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.agents.functions.with_raw_response.update( + client.doagents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -150,7 +150,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - function = client.agents.functions.delete( + function = client.doagents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -159,7 +159,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.functions.with_raw_response.delete( + response = client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.functions.with_streaming_response.delete( + with client.doagents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -188,13 +188,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( + client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( + client.doagents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) @@ -206,7 +206,7 @@ class TestAsyncFunctions: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.create( + function = await async_client.doagents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -214,7 +214,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.create( + function = await async_client.doagents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -229,7 +229,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.functions.with_raw_response.create( + response = await async_client.doagents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -241,7 +241,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.functions.with_streaming_response.create( + async with async_client.doagents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -256,14 +256,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.create( + await async_client.doagents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.update( + function = await async_client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -272,7 +272,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.update( + function = await async_client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -289,7 +289,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.functions.with_raw_response.update( + response = await async_client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.functions.with_streaming_response.update( + async with async_client.doagents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -318,13 +318,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( + await async_client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( + await async_client.doagents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.delete( + function = await async_client.doagents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -341,7 +341,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.functions.with_raw_response.delete( + response = await async_client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -354,7 +354,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.functions.with_streaming_response.delete( + async with async_client.doagents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -370,13 +370,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( + await async_client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( + await async_client.doagents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/doagents/test_knowledge_bases.py similarity index 82% rename from tests/api_resources/agents/test_knowledge_bases.py rename to tests/api_resources/doagents/test_knowledge_bases.py index dff80a9a..f077caaa 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/doagents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize def test_method_attach(self, client: GradientAI) -> None: - knowledge_base = client.agents.knowledge_bases.attach( + knowledge_base = client.doagents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -28,7 +28,7 @@ def test_method_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach(self, client: GradientAI) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach( + response = client.doagents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -40,7 +40,7 @@ def test_raw_response_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach(self, client: GradientAI) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach( + with client.doagents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -55,14 +55,14 @@ def test_streaming_response_attach(self, client: GradientAI) -> None: @parametrize def test_path_params_attach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach( + client.doagents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize def test_method_attach_single(self, client: GradientAI) -> None: - knowledge_base = client.agents.knowledge_bases.attach_single( + knowledge_base = client.doagents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -71,7 +71,7 @@ def test_method_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach_single( + response = client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach_single( + with client.doagents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -100,13 +100,13 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( + client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( + client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -114,7 +114,7 @@ def test_path_params_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: - knowledge_base = client.agents.knowledge_bases.detach( + knowledge_base = client.doagents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -123,7 +123,7 @@ def test_method_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: - response = client.agents.knowledge_bases.with_raw_response.detach( + response = client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -136,7 +136,7 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: - with client.agents.knowledge_bases.with_streaming_response.detach( + with client.doagents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -152,13 +152,13 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: @parametrize def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( + client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( + client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -170,7 +170,7 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize async def test_method_attach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach( + knowledge_base = await async_client.doagents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -178,7 +178,7 @@ async def test_method_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach( + response = await async_client.doagents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -190,7 +190,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach( + async with async_client.doagents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -205,14 +205,14 @@ async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach( + await async_client.doagents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach_single( + knowledge_base = await async_client.doagents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -221,7 +221,7 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( + response = await async_client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -234,7 +234,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( + async with async_client.doagents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -250,13 +250,13 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien @parametrize async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( + await async_client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( + await async_client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -264,7 +264,7 @@ async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.agents.knowledge_bases.detach( + knowledge_base = await async_client.doagents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -273,7 +273,7 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.detach( + response = await async_client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -286,7 +286,7 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.detach( + async with async_client.doagents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -302,13 +302,13 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( + await async_client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( + await async_client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/doagents/test_versions.py similarity index 84% rename from tests/api_resources/agents/test_versions.py rename to tests/api_resources/doagents/test_versions.py index 77fee4c6..ec5e293d 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/doagents/test_versions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( VersionListResponse, VersionUpdateResponse, ) @@ -23,7 +23,7 @@ class TestVersions: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - version = client.agents.versions.update( + version = client.doagents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -31,7 +31,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - version = client.agents.versions.update( + version = client.doagents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -41,7 +41,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.versions.with_raw_response.update( + response = client.doagents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -53,7 +53,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.versions.with_streaming_response.update( + with client.doagents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -68,14 +68,14 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.versions.with_raw_response.update( + client.doagents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - version = client.agents.versions.list( + version = client.doagents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -83,7 +83,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - version = client.agents.versions.list( + version = client.doagents.versions.list( uuid="uuid", page=0, per_page=0, @@ -93,7 +93,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.versions.with_raw_response.list( + response = client.doagents.versions.with_raw_response.list( uuid="uuid", ) @@ -105,7 +105,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.versions.with_streaming_response.list( + with client.doagents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -120,7 +120,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.versions.with_raw_response.list( + client.doagents.versions.with_raw_response.list( uuid="", ) @@ -131,7 +131,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.update( + version = await async_client.doagents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -139,7 +139,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.update( + version = await async_client.doagents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -149,7 +149,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.versions.with_raw_response.update( + response = await async_client.doagents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -161,7 +161,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.versions.with_streaming_response.update( + async with async_client.doagents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -176,14 +176,14 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.versions.with_raw_response.update( + await async_client.doagents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.list( + version = await async_client.doagents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.list( + version = await async_client.doagents.versions.list( uuid="uuid", page=0, per_page=0, @@ -201,7 +201,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.versions.with_raw_response.list( + response = await async_client.doagents.versions.with_raw_response.list( uuid="uuid", ) @@ -213,7 +213,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.versions.with_streaming_response.list( + async with async_client.doagents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -228,6 +228,6 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.versions.with_raw_response.list( + await async_client.doagents.versions.with_raw_response.list( uuid="", ) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_doagents.py similarity index 66% rename from tests/api_resources/test_agents.py rename to tests/api_resources/test_doagents.py index f39ac4d5..9a8c5c91 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_doagents.py @@ -10,30 +10,30 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type from gradientai.types import ( - AgentListResponse, - AgentCreateResponse, - AgentDeleteResponse, - AgentUpdateResponse, - AgentRetrieveResponse, - AgentUpdateStatusResponse, + DoagentListResponse, + DoagentCreateResponse, + DoagentDeleteResponse, + DoagentUpdateResponse, + DoagentRetrieveResponse, + DoagentUpdateStatusResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestAgents: +class TestDoagents: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - agent = client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = client.doagents.create() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.create( + doagent = client.doagents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -45,61 +45,61 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: region="region", tags=["string"], ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.create() + response = client.doagents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.create() as response: + with client.doagents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - agent = client.agents.retrieve( + doagent = client.doagents.retrieve( "uuid", ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.retrieve( + response = client.doagents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.retrieve( + with client.doagents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -107,22 +107,22 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.retrieve( + client.doagents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - agent = client.agents.update( + doagent = client.doagents.update( path_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.update( + doagent = client.doagents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -140,31 +140,31 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: top_p=0, body_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.update( + response = client.doagents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.update( + with client.doagents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -172,79 +172,79 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update( + client.doagents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - agent = client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = client.doagents.list() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.list( + doagent = client.doagents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(AgentListResponse, agent, path=["response"]) + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.list() + response = client.doagents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.list() as response: + with client.doagents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - agent = client.agents.delete( + doagent = client.doagents.delete( "uuid", ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.delete( + response = client.doagents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.delete( + with client.doagents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -252,51 +252,51 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.delete( + client.doagents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_status(self, client: GradientAI) -> None: - agent = client.agents.update_status( + doagent = client.doagents.update_status( path_uuid="uuid", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.update_status( + doagent = client.doagents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.update_status( + response = client.doagents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.update_status( + with client.doagents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -304,24 +304,24 @@ def test_streaming_response_update_status(self, client: GradientAI) -> None: @parametrize def test_path_params_update_status(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update_status( + client.doagents.with_raw_response.update_status( path_uuid="", ) -class TestAsyncAgents: +class TestAsyncDoagents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = await async_client.doagents.create() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.create( + doagent = await async_client.doagents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -333,61 +333,61 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI region="region", tags=["string"], ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.create() + response = await async_client.doagents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.create() as response: + async with async_client.doagents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.retrieve( + doagent = await async_client.doagents.retrieve( "uuid", ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.retrieve( + response = await async_client.doagents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.retrieve( + async with async_client.doagents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -395,22 +395,22 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.retrieve( + await async_client.doagents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update( + doagent = await async_client.doagents.update( path_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update( + doagent = await async_client.doagents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -428,31 +428,31 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI top_p=0, body_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.update( + response = await async_client.doagents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.update( + async with async_client.doagents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -460,79 +460,79 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update( + await async_client.doagents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = await async_client.doagents.list() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.list( + doagent = await async_client.doagents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(AgentListResponse, agent, path=["response"]) + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.list() + response = await async_client.doagents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.list() as response: + async with async_client.doagents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.delete( + doagent = await async_client.doagents.delete( "uuid", ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.delete( + response = await async_client.doagents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.delete( + async with async_client.doagents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -540,51 +540,51 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.delete( + await async_client.doagents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update_status( + doagent = await async_client.doagents.update_status( path_uuid="uuid", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update_status( + doagent = await async_client.doagents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.update_status( + response = await async_client.doagents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.update_status( + async with async_client.doagents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -592,6 +592,6 @@ async def test_streaming_response_update_status(self, async_client: AsyncGradien @parametrize async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update_status( + await async_client.doagents.with_raw_response.update_status( path_uuid="", ) diff --git a/tests/test_client.py b/tests/test_client.py index d83082e3..4a26cbd0 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -724,7 +724,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @@ -734,7 +734,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -763,7 +763,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list(uuid="uuid") + response = client.doagents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -787,7 +787,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list( + response = client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -812,7 +812,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list( + response = client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) @@ -1544,7 +1544,7 @@ async def test_retrying_timeout_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @@ -1556,7 +1556,7 @@ async def test_retrying_status_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1586,7 +1586,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list(uuid="uuid") + response = await client.doagents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1611,7 +1611,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list( + response = await client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -1637,7 +1637,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list( + response = await client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) From 955623b09319757784e479f0403656aad50e48d4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 14:17:51 +0000 Subject: [PATCH 047/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 16 +- api.md | 70 ++--- src/gradientai/_client.py | 40 +-- src/gradientai/resources/__init__.py | 28 +- .../{doagents => agents}/__init__.py | 28 +- .../doagents.py => agents/agents.py} | 218 +++++++-------- .../{doagents => agents}/api_keys.py | 12 +- .../{doagents => agents}/child_agents.py | 10 +- .../{doagents => agents}/functions.py | 8 +- .../{doagents => agents}/knowledge_bases.py | 4 +- .../{doagents => agents}/versions.py | 6 +- src/gradientai/types/__init__.py | 20 +- ...reate_params.py => agent_create_params.py} | 4 +- ...e_response.py => agent_create_response.py} | 4 +- ...e_response.py => agent_delete_response.py} | 4 +- ...nt_list_params.py => agent_list_params.py} | 4 +- ...ist_response.py => agent_list_response.py} | 8 +- ...response.py => agent_retrieve_response.py} | 4 +- ...pdate_params.py => agent_update_params.py} | 4 +- ...e_response.py => agent_update_response.py} | 4 +- ...arams.py => agent_update_status_params.py} | 4 +- .../types/agent_update_status_response.py | 16 ++ .../types/{doagents => agents}/__init__.py | 0 .../api_key_create_params.py | 0 .../api_key_create_response.py | 0 .../api_key_delete_response.py | 0 .../api_key_list_params.py | 0 .../api_key_list_response.py | 0 .../api_key_regenerate_response.py | 0 .../api_key_update_params.py | 0 .../api_key_update_response.py | 0 .../api_link_knowledge_base_output.py | 0 .../types/{doagents => agents}/api_links.py | 0 .../types/{doagents => agents}/api_meta.py | 0 .../child_agent_add_params.py | 0 .../child_agent_add_response.py | 0 .../child_agent_delete_response.py | 0 .../child_agent_update_params.py | 0 .../child_agent_update_response.py | 0 .../child_agent_view_response.py | 0 .../function_create_params.py | 0 .../function_create_response.py | 0 .../function_delete_response.py | 0 .../function_update_params.py | 0 .../function_update_response.py | 0 .../knowledge_base_detach_response.py | 0 .../version_list_params.py | 0 .../version_list_response.py | 0 .../version_update_params.py | 0 .../version_update_response.py | 0 .../types/doagent_update_status_response.py | 16 -- .../types/indexing_job_list_response.py | 4 +- .../types/inference/api_key_list_response.py | 4 +- .../types/knowledge_base_list_response.py | 4 +- .../data_source_list_response.py | 4 +- src/gradientai/types/model_list_response.py | 4 +- .../anthropic/key_list_agents_response.py | 4 +- .../providers/anthropic/key_list_response.py | 4 +- .../providers/openai/key_list_response.py | 4 +- .../openai/key_retrieve_agents_response.py | 4 +- .../{doagents => agents}/__init__.py | 0 .../{doagents => agents}/test_api_keys.py | 106 ++++---- .../{doagents => agents}/test_child_agents.py | 86 +++--- .../{doagents => agents}/test_functions.py | 66 ++--- .../test_knowledge_bases.py | 58 ++-- .../{doagents => agents}/test_versions.py | 42 +-- .../{test_doagents.py => test_agents.py} | 256 +++++++++--------- tests/test_client.py | 20 +- 69 files changed, 602 insertions(+), 602 deletions(-) rename src/gradientai/resources/{doagents => agents}/__init__.py (84%) rename src/gradientai/resources/{doagents/doagents.py => agents/agents.py} (87%) rename src/gradientai/resources/{doagents => agents}/api_keys.py (98%) rename src/gradientai/resources/{doagents => agents}/child_agents.py (98%) rename src/gradientai/resources/{doagents => agents}/functions.py (98%) rename src/gradientai/resources/{doagents => agents}/knowledge_bases.py (98%) rename src/gradientai/resources/{doagents => agents}/versions.py (98%) rename src/gradientai/types/{doagent_create_params.py => agent_create_params.py} (90%) rename src/gradientai/types/{doagent_update_response.py => agent_create_response.py} (77%) rename src/gradientai/types/{doagent_create_response.py => agent_delete_response.py} (77%) rename src/gradientai/types/{doagent_list_params.py => agent_list_params.py} (79%) rename src/gradientai/types/{doagent_list_response.py => agent_list_response.py} (98%) rename src/gradientai/types/{doagent_delete_response.py => agent_retrieve_response.py} (77%) rename src/gradientai/types/{doagent_update_params.py => agent_update_params.py} (95%) rename src/gradientai/types/{doagent_retrieve_response.py => agent_update_response.py} (76%) rename src/gradientai/types/{doagent_update_status_params.py => agent_update_status_params.py} (79%) create mode 100644 src/gradientai/types/agent_update_status_response.py rename src/gradientai/types/{doagents => agents}/__init__.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_create_params.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_create_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_delete_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_list_params.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_list_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_regenerate_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_update_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_link_knowledge_base_output.py (100%) rename src/gradientai/types/{doagents => agents}/api_links.py (100%) rename src/gradientai/types/{doagents => agents}/api_meta.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_add_params.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_add_response.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_delete_response.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_update_response.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_view_response.py (100%) rename src/gradientai/types/{doagents => agents}/function_create_params.py (100%) rename src/gradientai/types/{doagents => agents}/function_create_response.py (100%) rename src/gradientai/types/{doagents => agents}/function_delete_response.py (100%) rename src/gradientai/types/{doagents => agents}/function_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/function_update_response.py (100%) rename src/gradientai/types/{doagents => agents}/knowledge_base_detach_response.py (100%) rename src/gradientai/types/{doagents => agents}/version_list_params.py (100%) rename src/gradientai/types/{doagents => agents}/version_list_response.py (100%) rename src/gradientai/types/{doagents => agents}/version_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/version_update_response.py (100%) delete mode 100644 src/gradientai/types/doagent_update_status_response.py rename tests/api_resources/{doagents => agents}/__init__.py (100%) rename tests/api_resources/{doagents => agents}/test_api_keys.py (84%) rename tests/api_resources/{doagents => agents}/test_child_agents.py (84%) rename tests/api_resources/{doagents => agents}/test_functions.py (85%) rename tests/api_resources/{doagents => agents}/test_knowledge_bases.py (82%) rename tests/api_resources/{doagents => agents}/test_versions.py (84%) rename tests/api_resources/{test_doagents.py => test_agents.py} (66%) diff --git a/.stats.yml b/.stats.yml index 0e1ae316..8f85d58c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: c424a9395cc2b0dbf298813e54562194 +config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 diff --git a/README.md b/README.md index d047f658..36edcfbd 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ client = GradientAI( ), # This is the default and can be omitted ) -versions = client.doagents.versions.list( +versions = client.agents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -61,7 +61,7 @@ client = AsyncGradientAI( async def main() -> None: - versions = await client.doagents.versions.list( + versions = await client.agents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -113,7 +113,7 @@ from gradientai import GradientAI client = GradientAI() try: - client.doagents.versions.list( + client.agents.versions.list( uuid="REPLACE_ME", ) except gradientai.APIConnectionError as e: @@ -158,7 +158,7 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).doagents.versions.list( +client.with_options(max_retries=5).agents.versions.list( uuid="REPLACE_ME", ) ``` @@ -183,7 +183,7 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).doagents.versions.list( +client.with_options(timeout=5.0).agents.versions.list( uuid="REPLACE_ME", ) ``` @@ -226,12 +226,12 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.doagents.versions.with_raw_response.list( +response = client.agents.versions.with_raw_response.list( uuid="REPLACE_ME", ) print(response.headers.get('X-My-Header')) -version = response.parse() # get the object that `doagents.versions.list()` would have returned +version = response.parse() # get the object that `agents.versions.list()` would have returned print(version.agent_versions) ``` @@ -246,7 +246,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.doagents.versions.with_streaming_response.list( +with client.agents.versions.with_streaming_response.list( uuid="REPLACE_ME", ) as response: print(response.headers.get("X-My-Header")) diff --git a/api.md b/api.md index 0bc41bbe..2376a11f 100644 --- a/api.md +++ b/api.md @@ -1,4 +1,4 @@ -# Doagents +# Agents Types: @@ -10,30 +10,30 @@ from gradientai.types import ( APIDeploymentVisibility, APIOpenAIAPIKeyInfo, APIRetrievalMethod, - DoagentCreateResponse, - DoagentRetrieveResponse, - DoagentUpdateResponse, - DoagentListResponse, - DoagentDeleteResponse, - DoagentUpdateStatusResponse, + AgentCreateResponse, + AgentRetrieveResponse, + AgentUpdateResponse, + AgentListResponse, + AgentDeleteResponse, + AgentUpdateStatusResponse, ) ``` Methods: -- client.doagents.create(\*\*params) -> DoagentCreateResponse -- client.doagents.retrieve(uuid) -> DoagentRetrieveResponse -- client.doagents.update(path_uuid, \*\*params) -> DoagentUpdateResponse -- client.doagents.list(\*\*params) -> DoagentListResponse -- client.doagents.delete(uuid) -> DoagentDeleteResponse -- client.doagents.update_status(path_uuid, \*\*params) -> DoagentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.doagents import ( +from gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -44,18 +44,18 @@ from gradientai.types.doagents import ( Methods: -- client.doagents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.doagents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.doagents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.doagents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.doagents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Functions Types: ```python -from gradientai.types.doagents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -64,43 +64,43 @@ from gradientai.types.doagents import ( Methods: -- client.doagents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.doagents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.doagents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.doagents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse +from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.doagents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.doagents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.doagents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.doagents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.doagents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## ChildAgents Types: ```python -from gradientai.types.doagents import ( +from gradientai.types.agents import ( ChildAgentUpdateResponse, ChildAgentDeleteResponse, ChildAgentAddResponse, @@ -110,10 +110,10 @@ from gradientai.types.doagents import ( Methods: -- client.doagents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.doagents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.doagents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.doagents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse # Providers diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 992559a2..0a5eb9a1 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,12 +31,12 @@ ) if TYPE_CHECKING: - from .resources import chat, models, regions, doagents, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource + from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.doagents.doagents import DoagentsResource, AsyncDoagentsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -110,10 +110,10 @@ def __init__( ) @cached_property - def doagents(self) -> DoagentsResource: - from .resources.doagents import DoagentsResource + def agents(self) -> AgentsResource: + from .resources.agents import AgentsResource - return DoagentsResource(self) + return AgentsResource(self) @cached_property def providers(self) -> ProvidersResource: @@ -329,10 +329,10 @@ def __init__( ) @cached_property - def doagents(self) -> AsyncDoagentsResource: - from .resources.doagents import AsyncDoagentsResource + def agents(self) -> AsyncAgentsResource: + from .resources.agents import AsyncAgentsResource - return AsyncDoagentsResource(self) + return AsyncAgentsResource(self) @cached_property def providers(self) -> AsyncProvidersResource: @@ -498,10 +498,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.DoagentsResourceWithRawResponse: - from .resources.doagents import DoagentsResourceWithRawResponse + def agents(self) -> agents.AgentsResourceWithRawResponse: + from .resources.agents import AgentsResourceWithRawResponse - return DoagentsResourceWithRawResponse(self._client.doagents) + return AgentsResourceWithRawResponse(self._client.agents) @cached_property def providers(self) -> providers.ProvidersResourceWithRawResponse: @@ -553,10 +553,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.AsyncDoagentsResourceWithRawResponse: - from .resources.doagents import AsyncDoagentsResourceWithRawResponse + def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: + from .resources.agents import AsyncAgentsResourceWithRawResponse - return AsyncDoagentsResourceWithRawResponse(self._client.doagents) + return AsyncAgentsResourceWithRawResponse(self._client.agents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: @@ -608,10 +608,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.DoagentsResourceWithStreamingResponse: - from .resources.doagents import DoagentsResourceWithStreamingResponse + def agents(self) -> agents.AgentsResourceWithStreamingResponse: + from .resources.agents import AgentsResourceWithStreamingResponse - return DoagentsResourceWithStreamingResponse(self._client.doagents) + return AgentsResourceWithStreamingResponse(self._client.agents) @cached_property def providers(self) -> providers.ProvidersResourceWithStreamingResponse: @@ -663,10 +663,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.AsyncDoagentsResourceWithStreamingResponse: - from .resources.doagents import AsyncDoagentsResourceWithStreamingResponse + def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: + from .resources.agents import AsyncAgentsResourceWithStreamingResponse - return AsyncDoagentsResourceWithStreamingResponse(self._client.doagents) + return AsyncAgentsResourceWithStreamingResponse(self._client.agents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 17791967..1763a13e 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,6 +8,14 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) from .models import ( ModelsResource, AsyncModelsResource, @@ -24,14 +32,6 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) -from .doagents import ( - DoagentsResource, - AsyncDoagentsResource, - DoagentsResourceWithRawResponse, - AsyncDoagentsResourceWithRawResponse, - DoagentsResourceWithStreamingResponse, - AsyncDoagentsResourceWithStreamingResponse, -) from .inference import ( InferenceResource, AsyncInferenceResource, @@ -66,12 +66,12 @@ ) __all__ = [ - "DoagentsResource", - "AsyncDoagentsResource", - "DoagentsResourceWithRawResponse", - "AsyncDoagentsResourceWithRawResponse", - "DoagentsResourceWithStreamingResponse", - "AsyncDoagentsResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", "ProvidersResource", "AsyncProvidersResource", "ProvidersResourceWithRawResponse", diff --git a/src/gradientai/resources/doagents/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 84% rename from src/gradientai/resources/doagents/__init__.py rename to src/gradientai/resources/agents/__init__.py index 5ee3485f..f41a0408 100644 --- a/src/gradientai/resources/doagents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -8,14 +16,6 @@ APIKeysResourceWithStreamingResponse, AsyncAPIKeysResourceWithStreamingResponse, ) -from .doagents import ( - DoagentsResource, - AsyncDoagentsResource, - DoagentsResourceWithRawResponse, - AsyncDoagentsResourceWithRawResponse, - DoagentsResourceWithStreamingResponse, - AsyncDoagentsResourceWithStreamingResponse, -) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -80,10 +80,10 @@ "AsyncChildAgentsResourceWithRawResponse", "ChildAgentsResourceWithStreamingResponse", "AsyncChildAgentsResourceWithStreamingResponse", - "DoagentsResource", - "AsyncDoagentsResource", - "DoagentsResourceWithRawResponse", - "AsyncDoagentsResourceWithRawResponse", - "DoagentsResourceWithStreamingResponse", - "AsyncDoagentsResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/doagents/doagents.py b/src/gradientai/resources/agents/agents.py similarity index 87% rename from src/gradientai/resources/doagents/doagents.py rename to src/gradientai/resources/agents/agents.py index 89951704..78439d33 100644 --- a/src/gradientai/resources/doagents/doagents.py +++ b/src/gradientai/resources/agents/agents.py @@ -9,10 +9,10 @@ from ...types import ( APIRetrievalMethod, APIDeploymentVisibility, - doagent_list_params, - doagent_create_params, - doagent_update_params, - doagent_update_status_params, + agent_list_params, + agent_create_params, + agent_update_params, + agent_update_status_params, ) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform @@ -65,19 +65,19 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) +from ...types.agent_list_response import AgentListResponse from ...types.api_retrieval_method import APIRetrievalMethod -from ...types.doagent_list_response import DoagentListResponse -from ...types.doagent_create_response import DoagentCreateResponse -from ...types.doagent_delete_response import DoagentDeleteResponse -from ...types.doagent_update_response import DoagentUpdateResponse +from ...types.agent_create_response import AgentCreateResponse +from ...types.agent_delete_response import AgentDeleteResponse +from ...types.agent_update_response import AgentUpdateResponse +from ...types.agent_retrieve_response import AgentRetrieveResponse from ...types.api_deployment_visibility import APIDeploymentVisibility -from ...types.doagent_retrieve_response import DoagentRetrieveResponse -from ...types.doagent_update_status_response import DoagentUpdateStatusResponse +from ...types.agent_update_status_response import AgentUpdateStatusResponse -__all__ = ["DoagentsResource", "AsyncDoagentsResource"] +__all__ = ["AgentsResource", "AsyncAgentsResource"] -class DoagentsResource(SyncAPIResource): +class AgentsResource(SyncAPIResource): @cached_property def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) @@ -99,23 +99,23 @@ def child_agents(self) -> ChildAgentsResource: return ChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> DoagentsResourceWithRawResponse: + def with_raw_response(self) -> AgentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return DoagentsResourceWithRawResponse(self) + return AgentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> DoagentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return DoagentsResourceWithStreamingResponse(self) + return AgentsResourceWithStreamingResponse(self) def create( self, @@ -136,7 +136,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentCreateResponse: + ) -> AgentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -175,12 +175,12 @@ def create( "region": region, "tags": tags, }, - doagent_create_params.DoagentCreateParams, + agent_create_params.AgentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentCreateResponse, + cast_to=AgentCreateResponse, ) def retrieve( @@ -193,7 +193,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentRetrieveResponse: + ) -> AgentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -217,7 +217,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentRetrieveResponse, + cast_to=AgentRetrieveResponse, ) def update( @@ -245,7 +245,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateResponse: + ) -> AgentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -303,12 +303,12 @@ def update( "top_p": top_p, "body_uuid": body_uuid, }, - doagent_update_params.DoagentUpdateParams, + agent_update_params.AgentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateResponse, + cast_to=AgentUpdateResponse, ) def list( @@ -323,7 +323,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentListResponse: + ) -> AgentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -357,10 +357,10 @@ def list( "page": page, "per_page": per_page, }, - doagent_list_params.DoagentListParams, + agent_list_params.AgentListParams, ), ), - cast_to=DoagentListResponse, + cast_to=AgentListResponse, ) def delete( @@ -373,7 +373,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentDeleteResponse: + ) -> AgentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -395,7 +395,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentDeleteResponse, + cast_to=AgentDeleteResponse, ) def update_status( @@ -410,7 +410,7 @@ def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateStatusResponse: + ) -> AgentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -436,16 +436,16 @@ def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - doagent_update_status_params.DoagentUpdateStatusParams, + agent_update_status_params.AgentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateStatusResponse, + cast_to=AgentUpdateStatusResponse, ) -class AsyncDoagentsResource(AsyncAPIResource): +class AsyncAgentsResource(AsyncAPIResource): @cached_property def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) @@ -467,23 +467,23 @@ def child_agents(self) -> AsyncChildAgentsResource: return AsyncChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> AsyncDoagentsResourceWithRawResponse: + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return AsyncDoagentsResourceWithRawResponse(self) + return AsyncAgentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncDoagentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return AsyncDoagentsResourceWithStreamingResponse(self) + return AsyncAgentsResourceWithStreamingResponse(self) async def create( self, @@ -504,7 +504,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentCreateResponse: + ) -> AgentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -543,12 +543,12 @@ async def create( "region": region, "tags": tags, }, - doagent_create_params.DoagentCreateParams, + agent_create_params.AgentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentCreateResponse, + cast_to=AgentCreateResponse, ) async def retrieve( @@ -561,7 +561,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentRetrieveResponse: + ) -> AgentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -585,7 +585,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentRetrieveResponse, + cast_to=AgentRetrieveResponse, ) async def update( @@ -613,7 +613,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateResponse: + ) -> AgentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -671,12 +671,12 @@ async def update( "top_p": top_p, "body_uuid": body_uuid, }, - doagent_update_params.DoagentUpdateParams, + agent_update_params.AgentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateResponse, + cast_to=AgentUpdateResponse, ) async def list( @@ -691,7 +691,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentListResponse: + ) -> AgentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -725,10 +725,10 @@ async def list( "page": page, "per_page": per_page, }, - doagent_list_params.DoagentListParams, + agent_list_params.AgentListParams, ), ), - cast_to=DoagentListResponse, + cast_to=AgentListResponse, ) async def delete( @@ -741,7 +741,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentDeleteResponse: + ) -> AgentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -763,7 +763,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentDeleteResponse, + cast_to=AgentDeleteResponse, ) async def update_status( @@ -778,7 +778,7 @@ async def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateStatusResponse: + ) -> AgentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -804,186 +804,186 @@ async def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - doagent_update_status_params.DoagentUpdateStatusParams, + agent_update_status_params.AgentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateStatusResponse, + cast_to=AgentUpdateStatusResponse, ) -class DoagentsResourceWithRawResponse: - def __init__(self, doagents: DoagentsResource) -> None: - self._doagents = doagents +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents self.create = to_raw_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = to_raw_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = to_raw_response_wrapper( - doagents.update, + agents.update, ) self.list = to_raw_response_wrapper( - doagents.list, + agents.list, ) self.delete = to_raw_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = to_raw_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._doagents.api_keys) + return APIKeysResourceWithRawResponse(self._agents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithRawResponse: - return FunctionsResourceWithRawResponse(self._doagents.functions) + return FunctionsResourceWithRawResponse(self._agents.functions) @cached_property def versions(self) -> VersionsResourceWithRawResponse: - return VersionsResourceWithRawResponse(self._doagents.versions) + return VersionsResourceWithRawResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) + return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._doagents.child_agents) + return ChildAgentsResourceWithRawResponse(self._agents.child_agents) -class AsyncDoagentsResourceWithRawResponse: - def __init__(self, doagents: AsyncDoagentsResource) -> None: - self._doagents = doagents +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents self.create = async_to_raw_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = async_to_raw_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = async_to_raw_response_wrapper( - doagents.update, + agents.update, ) self.list = async_to_raw_response_wrapper( - doagents.list, + agents.list, ) self.delete = async_to_raw_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = async_to_raw_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._doagents.api_keys) + return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithRawResponse: - return AsyncFunctionsResourceWithRawResponse(self._doagents.functions) + return AsyncFunctionsResourceWithRawResponse(self._agents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: - return AsyncVersionsResourceWithRawResponse(self._doagents.versions) + return AsyncVersionsResourceWithRawResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._doagents.child_agents) + return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) -class DoagentsResourceWithStreamingResponse: - def __init__(self, doagents: DoagentsResource) -> None: - self._doagents = doagents +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents self.create = to_streamed_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = to_streamed_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = to_streamed_response_wrapper( - doagents.update, + agents.update, ) self.list = to_streamed_response_wrapper( - doagents.list, + agents.list, ) self.delete = to_streamed_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = to_streamed_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._doagents.api_keys) + return APIKeysResourceWithStreamingResponse(self._agents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithStreamingResponse: - return FunctionsResourceWithStreamingResponse(self._doagents.functions) + return FunctionsResourceWithStreamingResponse(self._agents.functions) @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: - return VersionsResourceWithStreamingResponse(self._doagents.versions) + return VersionsResourceWithStreamingResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) + return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) + return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) -class AsyncDoagentsResourceWithStreamingResponse: - def __init__(self, doagents: AsyncDoagentsResource) -> None: - self._doagents = doagents +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents self.create = async_to_streamed_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = async_to_streamed_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = async_to_streamed_response_wrapper( - doagents.update, + agents.update, ) self.list = async_to_streamed_response_wrapper( - doagents.list, + agents.list, ) self.delete = async_to_streamed_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = async_to_streamed_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._doagents.api_keys) + return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: - return AsyncFunctionsResourceWithStreamingResponse(self._doagents.functions) + return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: - return AsyncVersionsResourceWithStreamingResponse(self._doagents.versions) + return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) + return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/gradientai/resources/doagents/api_keys.py b/src/gradientai/resources/agents/api_keys.py similarity index 98% rename from src/gradientai/resources/doagents/api_keys.py rename to src/gradientai/resources/agents/api_keys.py index c55249be..155e3adc 100644 --- a/src/gradientai/resources/doagents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -15,12 +15,12 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.doagents.api_key_list_response import APIKeyListResponse -from ...types.doagents.api_key_create_response import APIKeyCreateResponse -from ...types.doagents.api_key_delete_response import APIKeyDeleteResponse -from ...types.doagents.api_key_update_response import APIKeyUpdateResponse -from ...types.doagents.api_key_regenerate_response import APIKeyRegenerateResponse +from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.agents.api_key_list_response import APIKeyListResponse +from ...types.agents.api_key_create_response import APIKeyCreateResponse +from ...types.agents.api_key_delete_response import APIKeyDeleteResponse +from ...types.agents.api_key_update_response import APIKeyUpdateResponse +from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/doagents/child_agents.py b/src/gradientai/resources/agents/child_agents.py similarity index 98% rename from src/gradientai/resources/doagents/child_agents.py rename to src/gradientai/resources/agents/child_agents.py index 6e8abfb7..9031d8ce 100644 --- a/src/gradientai/resources/doagents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -15,11 +15,11 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import child_agent_add_params, child_agent_update_params -from ...types.doagents.child_agent_add_response import ChildAgentAddResponse -from ...types.doagents.child_agent_view_response import ChildAgentViewResponse -from ...types.doagents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.doagents.child_agent_update_response import ChildAgentUpdateResponse +from ...types.agents import child_agent_add_params, child_agent_update_params +from ...types.agents.child_agent_add_response import ChildAgentAddResponse +from ...types.agents.child_agent_view_response import ChildAgentViewResponse +from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse __all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] diff --git a/src/gradientai/resources/doagents/functions.py b/src/gradientai/resources/agents/functions.py similarity index 98% rename from src/gradientai/resources/doagents/functions.py rename to src/gradientai/resources/agents/functions.py index 65ab2801..67a811cc 100644 --- a/src/gradientai/resources/doagents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -15,10 +15,10 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import function_create_params, function_update_params -from ...types.doagents.function_create_response import FunctionCreateResponse -from ...types.doagents.function_delete_response import FunctionDeleteResponse -from ...types.doagents.function_update_response import FunctionUpdateResponse +from ...types.agents import function_create_params, function_update_params +from ...types.agents.function_create_response import FunctionCreateResponse +from ...types.agents.function_delete_response import FunctionDeleteResponse +from ...types.agents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] diff --git a/src/gradientai/resources/doagents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py similarity index 98% rename from src/gradientai/resources/doagents/knowledge_bases.py rename to src/gradientai/resources/agents/knowledge_bases.py index e806d7a2..3b9b0cd2 100644 --- a/src/gradientai/resources/doagents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -14,8 +14,8 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ...types.doagents.knowledge_base_detach_response import KnowledgeBaseDetachResponse +from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] diff --git a/src/gradientai/resources/doagents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 98% rename from src/gradientai/resources/doagents/versions.py rename to src/gradientai/resources/agents/versions.py index 6301bc0a..86dbf99f 100644 --- a/src/gradientai/resources/doagents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -15,9 +15,9 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import version_list_params, version_update_params -from ...types.doagents.version_list_response import VersionListResponse -from ...types.doagents.version_update_response import VersionUpdateResponse +from ...types.agents import version_list_params, version_update_params +from ...types.agents.version_list_response import VersionListResponse +from ...types.agents.version_update_response import VersionUpdateResponse __all__ = ["VersionsResource", "AsyncVersionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 09d071f0..5ee961c6 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -5,34 +5,34 @@ from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob +from .agent_list_params import AgentListParams as AgentListParams from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams -from .doagent_list_params import DoagentListParams as DoagentListParams +from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_update_params import AgentUpdateParams as AgentUpdateParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod from .region_list_response import RegionListResponse as RegionListResponse -from .doagent_create_params import DoagentCreateParams as DoagentCreateParams -from .doagent_list_response import DoagentListResponse as DoagentListResponse -from .doagent_update_params import DoagentUpdateParams as DoagentUpdateParams +from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse +from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .doagent_create_response import DoagentCreateResponse as DoagentCreateResponse -from .doagent_delete_response import DoagentDeleteResponse as DoagentDeleteResponse -from .doagent_update_response import DoagentUpdateResponse as DoagentUpdateResponse from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .doagent_retrieve_response import DoagentRetrieveResponse as DoagentRetrieveResponse +from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .doagent_update_status_params import DoagentUpdateStatusParams as DoagentUpdateStatusParams +from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .doagent_update_status_response import DoagentUpdateStatusResponse as DoagentUpdateStatusResponse from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse diff --git a/src/gradientai/types/doagent_create_params.py b/src/gradientai/types/agent_create_params.py similarity index 90% rename from src/gradientai/types/doagent_create_params.py rename to src/gradientai/types/agent_create_params.py index b5b6e72d..58b99df7 100644 --- a/src/gradientai/types/doagent_create_params.py +++ b/src/gradientai/types/agent_create_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo -__all__ = ["DoagentCreateParams"] +__all__ = ["AgentCreateParams"] -class DoagentCreateParams(TypedDict, total=False): +class AgentCreateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/doagent_update_response.py b/src/gradientai/types/agent_create_response.py similarity index 77% rename from src/gradientai/types/doagent_update_response.py rename to src/gradientai/types/agent_create_response.py index 4d48bee7..48545fe9 100644 --- a/src/gradientai/types/doagent_update_response.py +++ b/src/gradientai/types/agent_create_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentUpdateResponse"] +__all__ = ["AgentCreateResponse"] -class DoagentUpdateResponse(BaseModel): +class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_create_response.py b/src/gradientai/types/agent_delete_response.py similarity index 77% rename from src/gradientai/types/doagent_create_response.py rename to src/gradientai/types/agent_delete_response.py index 2d171436..eb1d440d 100644 --- a/src/gradientai/types/doagent_create_response.py +++ b/src/gradientai/types/agent_delete_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentCreateResponse"] +__all__ = ["AgentDeleteResponse"] -class DoagentCreateResponse(BaseModel): +class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_list_params.py b/src/gradientai/types/agent_list_params.py similarity index 79% rename from src/gradientai/types/doagent_list_params.py rename to src/gradientai/types/agent_list_params.py index a9b3fb2b..e13a10c9 100644 --- a/src/gradientai/types/doagent_list_params.py +++ b/src/gradientai/types/agent_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["DoagentListParams"] +__all__ = ["AgentListParams"] -class DoagentListParams(TypedDict, total=False): +class AgentListParams(TypedDict, total=False): only_deployed: bool """only list agents that are deployed.""" diff --git a/src/gradientai/types/doagent_list_response.py b/src/gradientai/types/agent_list_response.py similarity index 98% rename from src/gradientai/types/doagent_list_response.py rename to src/gradientai/types/agent_list_response.py index 65c2b076..6af9cd51 100644 --- a/src/gradientai/types/doagent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -5,14 +5,14 @@ from typing_extensions import Literal from .._models import BaseModel -from .doagents.api_meta import APIMeta +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase -from .doagents.api_links import APILinks from .api_retrieval_method import APIRetrievalMethod from .api_deployment_visibility import APIDeploymentVisibility __all__ = [ - "DoagentListResponse", + "AgentListResponse", "Agent", "AgentChatbot", "AgentChatbotIdentifier", @@ -323,7 +323,7 @@ class Agent(BaseModel): uuid: Optional[str] = None -class DoagentListResponse(BaseModel): +class AgentListResponse(BaseModel): agents: Optional[List[Agent]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/doagent_delete_response.py b/src/gradientai/types/agent_retrieve_response.py similarity index 77% rename from src/gradientai/types/doagent_delete_response.py rename to src/gradientai/types/agent_retrieve_response.py index 5d90ba17..2eed88af 100644 --- a/src/gradientai/types/doagent_delete_response.py +++ b/src/gradientai/types/agent_retrieve_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentDeleteResponse"] +__all__ = ["AgentRetrieveResponse"] -class DoagentDeleteResponse(BaseModel): +class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_update_params.py b/src/gradientai/types/agent_update_params.py similarity index 95% rename from src/gradientai/types/doagent_update_params.py rename to src/gradientai/types/agent_update_params.py index a8598f5e..85f9a9c2 100644 --- a/src/gradientai/types/doagent_update_params.py +++ b/src/gradientai/types/agent_update_params.py @@ -8,10 +8,10 @@ from .._utils import PropertyInfo from .api_retrieval_method import APIRetrievalMethod -__all__ = ["DoagentUpdateParams"] +__all__ = ["AgentUpdateParams"] -class DoagentUpdateParams(TypedDict, total=False): +class AgentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/doagent_retrieve_response.py b/src/gradientai/types/agent_update_response.py similarity index 76% rename from src/gradientai/types/doagent_retrieve_response.py rename to src/gradientai/types/agent_update_response.py index 9fb0a722..2948aa1c 100644 --- a/src/gradientai/types/doagent_retrieve_response.py +++ b/src/gradientai/types/agent_update_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentRetrieveResponse"] +__all__ = ["AgentUpdateResponse"] -class DoagentRetrieveResponse(BaseModel): +class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py similarity index 79% rename from src/gradientai/types/doagent_update_status_params.py rename to src/gradientai/types/agent_update_status_params.py index 3bd0c539..a0cdc0b9 100644 --- a/src/gradientai/types/doagent_update_status_params.py +++ b/src/gradientai/types/agent_update_status_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo from .api_deployment_visibility import APIDeploymentVisibility -__all__ = ["DoagentUpdateStatusParams"] +__all__ = ["AgentUpdateStatusParams"] -class DoagentUpdateStatusParams(TypedDict, total=False): +class AgentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] visibility: APIDeploymentVisibility diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py new file mode 100644 index 00000000..b200f99d --- /dev/null +++ b/src/gradientai/types/agent_update_status_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentUpdateStatusResponse"] + + +class AgentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/doagents/__init__.py b/src/gradientai/types/agents/__init__.py similarity index 100% rename from src/gradientai/types/doagents/__init__.py rename to src/gradientai/types/agents/__init__.py diff --git a/src/gradientai/types/doagents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/doagents/api_key_create_params.py rename to src/gradientai/types/agents/api_key_create_params.py diff --git a/src/gradientai/types/doagents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_create_response.py rename to src/gradientai/types/agents/api_key_create_response.py diff --git a/src/gradientai/types/doagents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_delete_response.py rename to src/gradientai/types/agents/api_key_delete_response.py diff --git a/src/gradientai/types/doagents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/doagents/api_key_list_params.py rename to src/gradientai/types/agents/api_key_list_params.py diff --git a/src/gradientai/types/doagents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_list_response.py rename to src/gradientai/types/agents/api_key_list_response.py diff --git a/src/gradientai/types/doagents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_regenerate_response.py rename to src/gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/gradientai/types/doagents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/doagents/api_key_update_params.py rename to src/gradientai/types/agents/api_key_update_params.py diff --git a/src/gradientai/types/doagents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_update_response.py rename to src/gradientai/types/agents/api_key_update_response.py diff --git a/src/gradientai/types/doagents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/doagents/api_link_knowledge_base_output.py rename to src/gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/doagents/api_links.py b/src/gradientai/types/agents/api_links.py similarity index 100% rename from src/gradientai/types/doagents/api_links.py rename to src/gradientai/types/agents/api_links.py diff --git a/src/gradientai/types/doagents/api_meta.py b/src/gradientai/types/agents/api_meta.py similarity index 100% rename from src/gradientai/types/doagents/api_meta.py rename to src/gradientai/types/agents/api_meta.py diff --git a/src/gradientai/types/doagents/child_agent_add_params.py b/src/gradientai/types/agents/child_agent_add_params.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_add_params.py rename to src/gradientai/types/agents/child_agent_add_params.py diff --git a/src/gradientai/types/doagents/child_agent_add_response.py b/src/gradientai/types/agents/child_agent_add_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_add_response.py rename to src/gradientai/types/agents/child_agent_add_response.py diff --git a/src/gradientai/types/doagents/child_agent_delete_response.py b/src/gradientai/types/agents/child_agent_delete_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_delete_response.py rename to src/gradientai/types/agents/child_agent_delete_response.py diff --git a/src/gradientai/types/doagents/child_agent_update_params.py b/src/gradientai/types/agents/child_agent_update_params.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_update_params.py rename to src/gradientai/types/agents/child_agent_update_params.py diff --git a/src/gradientai/types/doagents/child_agent_update_response.py b/src/gradientai/types/agents/child_agent_update_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_update_response.py rename to src/gradientai/types/agents/child_agent_update_response.py diff --git a/src/gradientai/types/doagents/child_agent_view_response.py b/src/gradientai/types/agents/child_agent_view_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_view_response.py rename to src/gradientai/types/agents/child_agent_view_response.py diff --git a/src/gradientai/types/doagents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py similarity index 100% rename from src/gradientai/types/doagents/function_create_params.py rename to src/gradientai/types/agents/function_create_params.py diff --git a/src/gradientai/types/doagents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py similarity index 100% rename from src/gradientai/types/doagents/function_create_response.py rename to src/gradientai/types/agents/function_create_response.py diff --git a/src/gradientai/types/doagents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/gradientai/types/doagents/function_delete_response.py rename to src/gradientai/types/agents/function_delete_response.py diff --git a/src/gradientai/types/doagents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py similarity index 100% rename from src/gradientai/types/doagents/function_update_params.py rename to src/gradientai/types/agents/function_update_params.py diff --git a/src/gradientai/types/doagents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py similarity index 100% rename from src/gradientai/types/doagents/function_update_response.py rename to src/gradientai/types/agents/function_update_response.py diff --git a/src/gradientai/types/doagents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/doagents/knowledge_base_detach_response.py rename to src/gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/doagents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/gradientai/types/doagents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/gradientai/types/doagents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/gradientai/types/doagents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/gradientai/types/doagents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/gradientai/types/doagents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/gradientai/types/doagents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/gradientai/types/doagents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/gradientai/types/doagent_update_status_response.py b/src/gradientai/types/doagent_update_status_response.py deleted file mode 100644 index b31c1e99..00000000 --- a/src/gradientai/types/doagent_update_status_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["DoagentUpdateStatusResponse"] - - -class DoagentUpdateStatusResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py index dc94b966..1379cc55 100644 --- a/src/gradientai/types/indexing_job_list_response.py +++ b/src/gradientai/types/indexing_job_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob -from .doagents.api_meta import APIMeta -from .doagents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 9cbc4bd5..535e2f96 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..doagents.api_meta import APIMeta -from ..doagents.api_links import APILinks +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index 4fa7536d..09ca1ad3 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel -from .doagents.api_meta import APIMeta +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase -from .doagents.api_links import APILinks __all__ = ["KnowledgeBaseListResponse"] diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index d0c16c12..78246ce1 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..doagents.api_meta import APIMeta -from ..doagents.api_links import APILinks +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource __all__ = ["DataSourceListResponse"] diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 1eb8f907..e6f5fad5 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -4,8 +4,8 @@ from .._models import BaseModel from .api_model import APIModel -from .doagents.api_meta import APIMeta -from .doagents.api_links import APILinks +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks __all__ = ["ModelListResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/gradientai/types/providers/anthropic/key_list_agents_response.py index 174b5ea0..ba6ca946 100644 --- a/src/gradientai/types/providers/anthropic/key_list_agents_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks __all__ = ["KeyListAgentsResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/gradientai/types/providers/anthropic/key_list_response.py index 7699e23b..d0b84e96 100644 --- a/src/gradientai/types/providers/anthropic/key_list_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/gradientai/types/providers/openai/key_list_response.py index 68a74cd1..c263cba3 100644 --- a/src/gradientai/types/providers/openai/key_list_response.py +++ b/src/gradientai/types/providers/openai/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py index 9393fe08..f42edea6 100644 --- a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks __all__ = ["KeyRetrieveAgentsResponse"] diff --git a/tests/api_resources/doagents/__init__.py b/tests/api_resources/agents/__init__.py similarity index 100% rename from tests/api_resources/doagents/__init__.py rename to tests/api_resources/agents/__init__.py diff --git a/tests/api_resources/doagents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py similarity index 84% rename from tests/api_resources/doagents/test_api_keys.py rename to tests/api_resources/agents/test_api_keys.py index dd654e83..e8489258 100644 --- a/tests/api_resources/doagents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,7 +26,7 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.create( + api_key = client.agents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -34,7 +34,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.create( + api_key = client.agents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.create( + response = client.agents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.create( + with client.agents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -71,14 +71,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.create( + client.agents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.update( + api_key = client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -87,7 +87,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.update( + api_key = client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -99,7 +99,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.update( + response = client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -112,7 +112,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.update( + with client.agents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -128,13 +128,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.update( + client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.update( + client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -142,7 +142,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.list( + api_key = client.agents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -150,7 +150,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.list( + api_key = client.agents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -160,7 +160,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.list( + response = client.agents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.list( + with client.agents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -187,14 +187,14 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.list( + client.agents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.delete( + api_key = client.agents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.delete( + response = client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.delete( + with client.agents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -232,13 +232,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.delete( + client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.delete( + client.agents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -246,7 +246,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.regenerate( + api_key = client.agents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -255,7 +255,7 @@ def test_method_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.regenerate( + response = client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.regenerate( + with client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -284,13 +284,13 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.regenerate( + client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.regenerate( + client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.create( + api_key = await async_client.agents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -310,7 +310,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.create( + api_key = await async_client.agents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -320,7 +320,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.create( + response = await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.create( + async with async_client.agents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -347,14 +347,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.create( + await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.update( + api_key = await async_client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -363,7 +363,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.update( + api_key = await async_client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -375,7 +375,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.update( + response = await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -388,7 +388,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.update( + async with async_client.agents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -404,13 +404,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.update( + await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.update( + await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -418,7 +418,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.list( + api_key = await async_client.agents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -426,7 +426,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.list( + api_key = await async_client.agents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -436,7 +436,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.list( + response = await async_client.agents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -448,7 +448,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.list( + async with async_client.agents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -463,14 +463,14 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.list( + await async_client.agents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.delete( + api_key = await async_client.agents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -479,7 +479,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.delete( + response = await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -492,7 +492,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.delete( + async with async_client.agents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -508,13 +508,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.delete( + await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.delete( + await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -522,7 +522,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.regenerate( + api_key = await async_client.agents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -531,7 +531,7 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.regenerate( + response = await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -544,7 +544,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.regenerate( + async with async_client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -560,13 +560,13 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI @parametrize async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.regenerate( + await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.regenerate( + await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/doagents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py similarity index 84% rename from tests/api_resources/doagents/test_child_agents.py rename to tests/api_resources/agents/test_child_agents.py index 8e0eb0a0..14af3b93 100644 --- a/tests/api_resources/doagents/test_child_agents.py +++ b/tests/api_resources/agents/test_child_agents.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( ChildAgentAddResponse, ChildAgentViewResponse, ChildAgentDeleteResponse, @@ -25,7 +25,7 @@ class TestChildAgents: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.update( + child_agent = client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -34,7 +34,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.update( + child_agent = client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -48,7 +48,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.update( + response = client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -61,7 +61,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.update( + with client.agents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -79,13 +79,13 @@ def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.doagents.child_agents.with_raw_response.update( + client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.update( + client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -93,7 +93,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.delete( + child_agent = client.agents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -102,7 +102,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.delete( + response = client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -115,7 +115,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.delete( + with client.agents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.delete( + client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.delete( + client.agents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -145,7 +145,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.add( + child_agent = client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -154,7 +154,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.add( + child_agent = client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -167,7 +167,7 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_add(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.add( + response = client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -180,7 +180,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.add( + with client.agents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -198,13 +198,13 @@ def test_path_params_add(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.doagents.child_agents.with_raw_response.add( + client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.add( + client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -212,7 +212,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_view(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.view( + child_agent = client.agents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -220,7 +220,7 @@ def test_method_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_view(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.view( + response = client.agents.child_agents.with_raw_response.view( "uuid", ) @@ -232,7 +232,7 @@ def test_raw_response_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_view(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.view( + with client.agents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -247,7 +247,7 @@ def test_streaming_response_view(self, client: GradientAI) -> None: @parametrize def test_path_params_view(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.child_agents.with_raw_response.view( + client.agents.child_agents.with_raw_response.view( "", ) @@ -258,7 +258,7 @@ class TestAsyncChildAgents: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.update( + child_agent = await async_client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -267,7 +267,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.update( + child_agent = await async_client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -281,7 +281,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.update( + response = await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -294,7 +294,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.update( + async with async_client.agents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -312,13 +312,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.doagents.child_agents.with_raw_response.update( + await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.update( + await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -326,7 +326,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.delete( + child_agent = await async_client.agents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -335,7 +335,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.delete( + response = await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -348,7 +348,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.delete( + async with async_client.agents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -364,13 +364,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.delete( + await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.delete( + await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -378,7 +378,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.add( + child_agent = await async_client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -387,7 +387,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.add( + child_agent = await async_client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -400,7 +400,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.add( + response = await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -413,7 +413,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.add( + async with async_client.agents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -431,13 +431,13 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.doagents.child_agents.with_raw_response.add( + await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.add( + await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -445,7 +445,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_view(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.view( + child_agent = await async_client.agents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -453,7 +453,7 @@ async def test_method_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.view( + response = await async_client.agents.child_agents.with_raw_response.view( "uuid", ) @@ -465,7 +465,7 @@ async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.view( + async with async_client.agents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -480,6 +480,6 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.view( + await async_client.agents.child_agents.with_raw_response.view( "", ) diff --git a/tests/api_resources/doagents/test_functions.py b/tests/api_resources/agents/test_functions.py similarity index 85% rename from tests/api_resources/doagents/test_functions.py rename to tests/api_resources/agents/test_functions.py index 11c76719..bfb05fa6 100644 --- a/tests/api_resources/doagents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, @@ -24,7 +24,7 @@ class TestFunctions: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - function = client.doagents.functions.create( + function = client.agents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - function = client.doagents.functions.create( + function = client.agents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.doagents.functions.with_raw_response.create( + response = client.agents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.doagents.functions.with_streaming_response.create( + with client.agents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -74,14 +74,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.functions.with_raw_response.create( + client.agents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - function = client.doagents.functions.update( + function = client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -90,7 +90,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - function = client.doagents.functions.update( + function = client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -107,7 +107,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.functions.with_raw_response.update( + response = client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -120,7 +120,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.functions.with_streaming_response.update( + with client.agents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -136,13 +136,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.functions.with_raw_response.update( + client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.doagents.functions.with_raw_response.update( + client.agents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -150,7 +150,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - function = client.doagents.functions.delete( + function = client.agents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -159,7 +159,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.functions.with_raw_response.delete( + response = client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.functions.with_streaming_response.delete( + with client.agents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -188,13 +188,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.functions.with_raw_response.delete( + client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.doagents.functions.with_raw_response.delete( + client.agents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) @@ -206,7 +206,7 @@ class TestAsyncFunctions: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.create( + function = await async_client.agents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -214,7 +214,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.create( + function = await async_client.agents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -229,7 +229,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.functions.with_raw_response.create( + response = await async_client.agents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -241,7 +241,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.functions.with_streaming_response.create( + async with async_client.agents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -256,14 +256,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.create( + await async_client.agents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.update( + function = await async_client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -272,7 +272,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.update( + function = await async_client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -289,7 +289,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.functions.with_raw_response.update( + response = await async_client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.functions.with_streaming_response.update( + async with async_client.agents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -318,13 +318,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.update( + await async_client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.update( + await async_client.agents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.delete( + function = await async_client.agents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -341,7 +341,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.functions.with_raw_response.delete( + response = await async_client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -354,7 +354,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.functions.with_streaming_response.delete( + async with async_client.agents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -370,13 +370,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.delete( + await async_client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.delete( + await async_client.agents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/doagents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py similarity index 82% rename from tests/api_resources/doagents/test_knowledge_bases.py rename to tests/api_resources/agents/test_knowledge_bases.py index f077caaa..dff80a9a 100644 --- a/tests/api_resources/doagents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize def test_method_attach(self, client: GradientAI) -> None: - knowledge_base = client.doagents.knowledge_bases.attach( + knowledge_base = client.agents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -28,7 +28,7 @@ def test_method_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach(self, client: GradientAI) -> None: - response = client.doagents.knowledge_bases.with_raw_response.attach( + response = client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -40,7 +40,7 @@ def test_raw_response_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach(self, client: GradientAI) -> None: - with client.doagents.knowledge_bases.with_streaming_response.attach( + with client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -55,14 +55,14 @@ def test_streaming_response_attach(self, client: GradientAI) -> None: @parametrize def test_path_params_attach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.attach( + client.agents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize def test_method_attach_single(self, client: GradientAI) -> None: - knowledge_base = client.doagents.knowledge_bases.attach_single( + knowledge_base = client.agents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -71,7 +71,7 @@ def test_method_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: - response = client.doagents.knowledge_bases.with_raw_response.attach_single( + response = client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: - with client.doagents.knowledge_bases.with_streaming_response.attach_single( + with client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -100,13 +100,13 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.attach_single( + client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.attach_single( + client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -114,7 +114,7 @@ def test_path_params_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: - knowledge_base = client.doagents.knowledge_bases.detach( + knowledge_base = client.agents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -123,7 +123,7 @@ def test_method_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: - response = client.doagents.knowledge_bases.with_raw_response.detach( + response = client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -136,7 +136,7 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: - with client.doagents.knowledge_bases.with_streaming_response.detach( + with client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -152,13 +152,13 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: @parametrize def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.detach( + client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.detach( + client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -170,7 +170,7 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize async def test_method_attach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.doagents.knowledge_bases.attach( + knowledge_base = await async_client.agents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -178,7 +178,7 @@ async def test_method_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.knowledge_bases.with_raw_response.attach( + response = await async_client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -190,7 +190,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.knowledge_bases.with_streaming_response.attach( + async with async_client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -205,14 +205,14 @@ async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.attach( + await async_client.agents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.doagents.knowledge_bases.attach_single( + knowledge_base = await async_client.agents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -221,7 +221,7 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.knowledge_bases.with_raw_response.attach_single( + response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -234,7 +234,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.knowledge_bases.with_streaming_response.attach_single( + async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -250,13 +250,13 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien @parametrize async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.attach_single( + await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.attach_single( + await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -264,7 +264,7 @@ async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.doagents.knowledge_bases.detach( + knowledge_base = await async_client.agents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -273,7 +273,7 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.knowledge_bases.with_raw_response.detach( + response = await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -286,7 +286,7 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.knowledge_bases.with_streaming_response.detach( + async with async_client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -302,13 +302,13 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.detach( + await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.detach( + await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/doagents/test_versions.py b/tests/api_resources/agents/test_versions.py similarity index 84% rename from tests/api_resources/doagents/test_versions.py rename to tests/api_resources/agents/test_versions.py index ec5e293d..77fee4c6 100644 --- a/tests/api_resources/doagents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) @@ -23,7 +23,7 @@ class TestVersions: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - version = client.doagents.versions.update( + version = client.agents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -31,7 +31,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - version = client.doagents.versions.update( + version = client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -41,7 +41,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.versions.with_raw_response.update( + response = client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -53,7 +53,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.versions.with_streaming_response.update( + with client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -68,14 +68,14 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.doagents.versions.with_raw_response.update( + client.agents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - version = client.doagents.versions.list( + version = client.agents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -83,7 +83,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - version = client.doagents.versions.list( + version = client.agents.versions.list( uuid="uuid", page=0, per_page=0, @@ -93,7 +93,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.doagents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -105,7 +105,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.doagents.versions.with_streaming_response.list( + with client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -120,7 +120,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.versions.with_raw_response.list( + client.agents.versions.with_raw_response.list( uuid="", ) @@ -131,7 +131,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.update( + version = await async_client.agents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -139,7 +139,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.update( + version = await async_client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -149,7 +149,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.versions.with_raw_response.update( + response = await async_client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -161,7 +161,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.versions.with_streaming_response.update( + async with async_client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -176,14 +176,14 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.doagents.versions.with_raw_response.update( + await async_client.agents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.list( + version = await async_client.agents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.list( + version = await async_client.agents.versions.list( uuid="uuid", page=0, per_page=0, @@ -201,7 +201,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.versions.with_raw_response.list( + response = await async_client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -213,7 +213,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.versions.with_streaming_response.list( + async with async_client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -228,6 +228,6 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.versions.with_raw_response.list( + await async_client.agents.versions.with_raw_response.list( uuid="", ) diff --git a/tests/api_resources/test_doagents.py b/tests/api_resources/test_agents.py similarity index 66% rename from tests/api_resources/test_doagents.py rename to tests/api_resources/test_agents.py index 9a8c5c91..f39ac4d5 100644 --- a/tests/api_resources/test_doagents.py +++ b/tests/api_resources/test_agents.py @@ -10,30 +10,30 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type from gradientai.types import ( - DoagentListResponse, - DoagentCreateResponse, - DoagentDeleteResponse, - DoagentUpdateResponse, - DoagentRetrieveResponse, - DoagentUpdateStatusResponse, + AgentListResponse, + AgentCreateResponse, + AgentDeleteResponse, + AgentUpdateResponse, + AgentRetrieveResponse, + AgentUpdateStatusResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestDoagents: +class TestAgents: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - doagent = client.doagents.create() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.create( + agent = client.agents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -45,61 +45,61 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: region="region", tags=["string"], ) - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.create() + response = client.agents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.create() as response: + with client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - doagent = client.doagents.retrieve( + agent = client.agents.retrieve( "uuid", ) - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.retrieve( + response = client.agents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.retrieve( + with client.agents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -107,22 +107,22 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.with_raw_response.retrieve( + client.agents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - doagent = client.doagents.update( + agent = client.agents.update( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.update( + agent = client.agents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -140,31 +140,31 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: top_p=0, body_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.update( + response = client.agents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.update( + with client.agents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -172,79 +172,79 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.doagents.with_raw_response.update( + client.agents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - doagent = client.doagents.list() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.list( + agent = client.agents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.list() + response = client.agents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.list() as response: + with client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - doagent = client.doagents.delete( + agent = client.agents.delete( "uuid", ) - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.delete( + response = client.agents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.delete( + with client.agents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -252,51 +252,51 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.with_raw_response.delete( + client.agents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_status(self, client: GradientAI) -> None: - doagent = client.doagents.update_status( + agent = client.agents.update_status( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.update_status( + agent = client.agents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.update_status( + response = client.agents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.update_status( + with client.agents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -304,24 +304,24 @@ def test_streaming_response_update_status(self, client: GradientAI) -> None: @parametrize def test_path_params_update_status(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.doagents.with_raw_response.update_status( + client.agents.with_raw_response.update_status( path_uuid="", ) -class TestAsyncDoagents: +class TestAsyncAgents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.create() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = await async_client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.create( + agent = await async_client.agents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -333,61 +333,61 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI region="region", tags=["string"], ) - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.create() + response = await async_client.agents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.create() as response: + async with async_client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.retrieve( + agent = await async_client.agents.retrieve( "uuid", ) - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.retrieve( + response = await async_client.agents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.retrieve( + async with async_client.agents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -395,22 +395,22 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.with_raw_response.retrieve( + await async_client.agents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update( + agent = await async_client.agents.update( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update( + agent = await async_client.agents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -428,31 +428,31 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI top_p=0, body_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.update( + response = await async_client.agents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.update( + async with async_client.agents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -460,79 +460,79 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.doagents.with_raw_response.update( + await async_client.agents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.list() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = await async_client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.list( + agent = await async_client.agents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.list() + response = await async_client.agents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.list() as response: + async with async_client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.delete( + agent = await async_client.agents.delete( "uuid", ) - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.delete( + response = await async_client.agents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.delete( + async with async_client.agents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -540,51 +540,51 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.with_raw_response.delete( + await async_client.agents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update_status( + agent = await async_client.agents.update_status( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update_status( + agent = await async_client.agents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.update_status( + response = await async_client.agents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.update_status( + async with async_client.agents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -592,6 +592,6 @@ async def test_streaming_response_update_status(self, async_client: AsyncGradien @parametrize async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.doagents.with_raw_response.update_status( + await async_client.agents.with_raw_response.update_status( path_uuid="", ) diff --git a/tests/test_client.py b/tests/test_client.py index 4a26cbd0..d83082e3 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -724,7 +724,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @@ -734,7 +734,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -763,7 +763,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.doagents.versions.with_raw_response.list(uuid="uuid") + response = client.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -787,7 +787,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.doagents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -812,7 +812,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.doagents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) @@ -1544,7 +1544,7 @@ async def test_retrying_timeout_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @@ -1556,7 +1556,7 @@ async def test_retrying_status_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1586,7 +1586,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.doagents.versions.with_raw_response.list(uuid="uuid") + response = await client.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1611,7 +1611,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.doagents.versions.with_raw_response.list( + response = await client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -1637,7 +1637,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.doagents.versions.with_raw_response.list( + response = await client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) From 845ed2e688db88f29d1a5b84d7b275d1f78a145d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 02:22:54 +0000 Subject: [PATCH 048/200] chore(internal): codegen related update --- .github/workflows/ci.yml | 6 +++--- .stats.yml | 2 +- scripts/utils/upload-artifact.sh | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 08bd7a02..6bfd00b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -35,7 +35,7 @@ jobs: run: ./scripts/lint upload: - if: github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' + if: github.repository == 'stainless-sdks/gradientai-python' timeout-minutes: 10 name: upload permissions: @@ -61,7 +61,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 diff --git a/.stats.yml b/.stats.yml index 8f85d58c..c2144164 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index c1019559..eb717c71 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/digitalocean-genai-sdk-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/gradientai-python/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From 9833b555d01be4e8e7535bcae351d3276eb750d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 04:15:38 +0000 Subject: [PATCH 049/200] feat(client): add support for aiohttp --- README.md | 36 ++++++++++++++++ pyproject.toml | 2 + requirements-dev.lock | 27 ++++++++++++ requirements.lock | 27 ++++++++++++ src/gradientai/__init__.py | 3 +- src/gradientai/_base_client.py | 22 ++++++++++ tests/api_resources/agents/test_api_keys.py | 4 +- .../api_resources/agents/test_child_agents.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- tests/api_resources/inference/test_models.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../providers/anthropic/test_keys.py | 4 +- .../providers/openai/test_keys.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_indexing_jobs.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 43 ++++++++++++++++--- 23 files changed, 201 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 36edcfbd..efae1613 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,42 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from PyPI +pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python[aiohttp] +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import os +import asyncio +from gradientai import DefaultAioHttpClient +from gradientai import AsyncGradientAI + + +async def main() -> None: + async with AsyncGradientAI( + api_key=os.environ.get( + "DIGITALOCEAN_GENAI_SDK_API_KEY" + ), # This is the default and can be omitted + http_client=DefaultAioHttpClient(), + ) as client: + versions = await client.agents.versions.list( + uuid="REPLACE_ME", + ) + print(versions.agent_versions) + + +asyncio.run(main()) +``` + ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: diff --git a/pyproject.toml b/pyproject.toml index 8f36a952..22cad738 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,6 +37,8 @@ classifiers = [ Homepage = "https://github.com/digitalocean/genai-python" Repository = "https://github.com/digitalocean/genai-python" +[project.optional-dependencies] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] [tool.rye] managed = true diff --git a/requirements-dev.lock b/requirements-dev.lock index 1e074a56..85b6a829 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -10,6 +10,13 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 @@ -17,6 +24,10 @@ anyio==4.4.0 # via httpx argcomplete==3.1.2 # via nox +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -34,16 +45,23 @@ execnet==2.1.1 # via pytest-xdist filelock==3.12.4 # via virtualenv +frozenlist==1.6.2 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp # via respx +httpx-aiohttp==0.1.6 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio # via httpx + # via yarl importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest @@ -51,6 +69,9 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py +multidict==6.4.4 + # via aiohttp + # via yarl mypy==1.14.1 mypy-extensions==1.0.0 # via mypy @@ -65,6 +86,9 @@ platformdirs==3.11.0 # via virtualenv pluggy==1.5.0 # via pytest +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python pydantic-core==2.27.1 @@ -98,11 +122,14 @@ tomli==2.0.2 typing-extensions==4.12.2 # via anyio # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via multidict # via mypy # via pydantic # via pydantic-core # via pyright virtualenv==20.24.5 # via nox +yarl==1.20.0 + # via aiohttp zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index dab2f6ce..47944bd5 100644 --- a/requirements.lock +++ b/requirements.lock @@ -10,11 +10,22 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via httpx +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -22,15 +33,28 @@ distro==1.8.0 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python exceptiongroup==1.2.2 # via anyio +frozenlist==1.6.2 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp +httpx-aiohttp==0.1.6 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio # via httpx + # via yarl +multidict==6.4.4 + # via aiohttp + # via yarl +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python pydantic-core==2.27.1 @@ -41,5 +65,8 @@ sniffio==1.3.0 typing-extensions==4.12.2 # via anyio # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via multidict # via pydantic # via pydantic-core +yarl==1.20.0 + # via aiohttp diff --git a/src/gradientai/__init__.py b/src/gradientai/__init__.py index e0f0a60b..3316fe47 100644 --- a/src/gradientai/__init__.py +++ b/src/gradientai/__init__.py @@ -36,7 +36,7 @@ UnprocessableEntityError, APIResponseValidationError, ) -from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging __all__ = [ @@ -78,6 +78,7 @@ "DEFAULT_CONNECTION_LIMITS", "DefaultHttpxClient", "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", ] if not _t.TYPE_CHECKING: diff --git a/src/gradientai/_base_client.py b/src/gradientai/_base_client.py index aa3b35f1..6dce600b 100644 --- a/src/gradientai/_base_client.py +++ b/src/gradientai/_base_client.py @@ -1289,6 +1289,24 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + if TYPE_CHECKING: DefaultAsyncHttpxClient = httpx.AsyncClient """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK @@ -1297,8 +1315,12 @@ def __init__(self, **kwargs: Any) -> None: This is useful because overriding the `http_client` with your own instance of `httpx.AsyncClient` will result in httpx's defaults being used, not ours. """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" else: DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index e8489258..beb9666a 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -297,7 +297,9 @@ def test_path_params_regenerate(self, client: GradientAI) -> None: class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py index 14af3b93..daa7b10e 100644 --- a/tests/api_resources/agents/test_child_agents.py +++ b/tests/api_resources/agents/test_child_agents.py @@ -253,7 +253,9 @@ def test_path_params_view(self, client: GradientAI) -> None: class TestAsyncChildAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index bfb05fa6..5a3693cb 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -201,7 +201,9 @@ def test_path_params_delete(self, client: GradientAI) -> None: class TestAsyncFunctions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index dff80a9a..e62c05ff 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -165,7 +165,9 @@ def test_path_params_detach(self, client: GradientAI) -> None: class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 77fee4c6..79f73672 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -126,7 +126,9 @@ def test_path_params_list(self, client: GradientAI) -> None: class TestAsyncVersions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 17319d86..b4c09579 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -100,7 +100,9 @@ def test_streaming_response_create(self, client: GradientAI) -> None: class TestAsyncCompletions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index d84572c7..90bf95b9 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -234,7 +234,9 @@ def test_path_params_update_regenerate(self, client: GradientAI) -> None: class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py index 936801cb..569345ed 100644 --- a/tests/api_resources/inference/test_models.py +++ b/tests/api_resources/inference/test_models.py @@ -89,7 +89,9 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index ce9c390e..9c466e2f 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -197,7 +197,9 @@ def test_path_params_delete(self, client: GradientAI) -> None: class TestAsyncDataSources: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py index fab973bf..86ec19f4 100644 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -289,7 +289,9 @@ def test_path_params_list_agents(self, client: GradientAI) -> None: class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py index 1bb270b1..ce5cb4f5 100644 --- a/tests/api_resources/providers/openai/test_keys.py +++ b/tests/api_resources/providers/openai/test_keys.py @@ -289,7 +289,9 @@ def test_path_params_retrieve_agents(self, client: GradientAI) -> None: class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index f39ac4d5..2cc0e080 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -310,7 +310,9 @@ def test_path_params_update_status(self, client: GradientAI) -> None: class TestAsyncAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py index d44a75ae..6a50d9b5 100644 --- a/tests/api_resources/test_indexing_jobs.py +++ b/tests/api_resources/test_indexing_jobs.py @@ -234,7 +234,9 @@ def test_path_params_update_cancel(self, client: GradientAI) -> None: class TestAsyncIndexingJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index c9171644..508820ce 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -273,7 +273,9 @@ def test_path_params_delete(self, client: GradientAI) -> None: class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 946b2eb9..5e119f71 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -58,7 +58,9 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 64c84612..8e25617f 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -56,7 +56,9 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncRegions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/conftest.py b/tests/conftest.py index 8432d29e..23079a7e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,10 +6,12 @@ import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator +import httpx import pytest from pytest_asyncio import is_async_test -from gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] @@ -27,6 +29,19 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: for async_test in pytest_asyncio_tests: async_test.add_marker(session_scope_marker, append=False) + # We skip tests that use both the aiohttp client and respx_mock as respx_mock + # doesn't support custom transports. + for item in items: + if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames: + continue + + if not hasattr(item, "callspec"): + continue + + async_client_param = item.callspec.params.get("async_client") + if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp": + item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock")) + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -45,9 +60,25 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]: @pytest.fixture(scope="session") async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI]: - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - - async with AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + param = getattr(request, "param", True) + + # defaults + strict = True + http_client: None | httpx.AsyncClient = None + + if isinstance(param, bool): + strict = param + elif is_dict(param): + strict = param.get("strict", True) + assert isinstance(strict, bool) + + http_client_type = param.get("http_client", "httpx") + if http_client_type == "aiohttp": + http_client = DefaultAioHttpClient() + else: + raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") + + async with AsyncGradientAI( + base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client + ) as client: yield client From 36a7c9f68a33a23e6b3aff72a3012dc111246269 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:29:45 +0000 Subject: [PATCH 050/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 8 ++++---- src/gradientai/_client.py | 12 ++++++------ tests/test_client.py | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.stats.yml b/.stats.yml index c2144164..17f19856 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 +config_hash: 48e21c88c078b1d478257b2da0c840b2 diff --git a/README.md b/README.md index efae1613..546252a1 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ from gradientai import GradientAI client = GradientAI( api_key=os.environ.get( - "DIGITALOCEAN_GENAI_SDK_API_KEY" + "DIGITALOCEAN_GRADIENTAI_API_KEY" ), # This is the default and can be omitted ) @@ -41,7 +41,7 @@ print(versions.agent_versions) While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `DIGITALOCEAN_GENAI_SDK_API_KEY="My API Key"` to your `.env` file +to add `DIGITALOCEAN_GRADIENTAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage @@ -55,7 +55,7 @@ from gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get( - "DIGITALOCEAN_GENAI_SDK_API_KEY" + "DIGITALOCEAN_GRADIENTAI_API_KEY" ), # This is the default and can be omitted ) @@ -95,7 +95,7 @@ from gradientai import AsyncGradientAI async def main() -> None: async with AsyncGradientAI( api_key=os.environ.get( - "DIGITALOCEAN_GENAI_SDK_API_KEY" + "DIGITALOCEAN_GRADIENTAI_API_KEY" ), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 0a5eb9a1..f83fb8a7 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -82,13 +82,13 @@ def __init__( ) -> None: """Construct a new synchronous GradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key @@ -301,13 +301,13 @@ def __init__( ) -> None: """Construct a new async AsyncGradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key diff --git a/tests/test_client.py b/tests/test_client.py index d83082e3..f80be1ea 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -341,7 +341,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 @@ -1153,7 +1153,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 From b049b358fc0f71d4532550739f84bd361906c234 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:30:36 +0000 Subject: [PATCH 051/200] feat(api): update via SDK Studio --- .github/workflows/create-releases.yml | 38 +++++++++++++++++++ .github/workflows/publish-pypi.yml | 8 +--- .github/workflows/release-doctor.yml | 3 +- .stats.yml | 2 +- CONTRIBUTING.md | 4 +- README.md | 6 +-- bin/check-release-environment | 4 ++ pyproject.toml | 6 +-- src/gradientai/resources/agents/agents.py | 8 ++-- src/gradientai/resources/agents/api_keys.py | 8 ++-- .../resources/agents/child_agents.py | 8 ++-- src/gradientai/resources/agents/functions.py | 8 ++-- .../resources/agents/knowledge_bases.py | 8 ++-- src/gradientai/resources/agents/versions.py | 8 ++-- src/gradientai/resources/chat/chat.py | 8 ++-- src/gradientai/resources/chat/completions.py | 8 ++-- src/gradientai/resources/indexing_jobs.py | 8 ++-- .../resources/inference/api_keys.py | 8 ++-- .../resources/inference/inference.py | 8 ++-- src/gradientai/resources/inference/models.py | 8 ++-- .../resources/knowledge_bases/data_sources.py | 8 ++-- .../knowledge_bases/knowledge_bases.py | 8 ++-- src/gradientai/resources/models.py | 8 ++-- .../providers/anthropic/anthropic.py | 8 ++-- .../resources/providers/anthropic/keys.py | 8 ++-- .../resources/providers/openai/keys.py | 8 ++-- .../resources/providers/openai/openai.py | 8 ++-- .../resources/providers/providers.py | 8 ++-- src/gradientai/resources/regions.py | 8 ++-- 29 files changed, 139 insertions(+), 100 deletions(-) create mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 00000000..04dac49f --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,38 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - main + +jobs: + release: + name: release + if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Install Rye + if: ${{ steps.release.outputs.releases_created }} + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 34110cd4..bff3a970 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to PyPI in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml +# workflow for re-running publishing to PyPI in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 9845ae8d..94e02117 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,4 +18,5 @@ jobs: run: | bash ./bin/check-release-environment env: + STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 17f19856..9e73986b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 48e21c88c078b1d478257b2da0c840b2 +config_hash: bae6be3845572f2dadf83c0aad336142 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fe7e0d7c..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/genai-python.git +$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 546252a1..3bba3198 100644 --- a/README.md +++ b/README.md @@ -271,9 +271,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -379,7 +379,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/bin/check-release-environment b/bin/check-release-environment index b1bd8969..78967e8b 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,6 +2,10 @@ errors=() +if [ -z "${STAINLESS_API_KEY}" ]; then + errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") +fi + if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi diff --git a/pyproject.toml b/pyproject.toml index 22cad738..1c89346a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/genai-python" -Repository = "https://github.com/digitalocean/genai-python" +Homepage = "https://github.com/digitalocean/gradientai-python" +Repository = "https://github.com/digitalocean/gradientai-python" [project.optional-dependencies] aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] @@ -124,7 +124,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 78439d33..63f0c4d4 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -104,7 +104,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -472,7 +472,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -481,7 +481,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 155e3adc..1cf2278e 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -278,7 +278,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -287,7 +287,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py index 9031d8ce..ad30f106 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ChildAgentsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ChildAgentsResourceWithStreamingResponse(self) @@ -245,7 +245,7 @@ def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncChildAgentsResourceWithRawResponse(self) @@ -254,7 +254,7 @@ def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncChildAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 67a811cc..8c5f3f49 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> FunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return FunctionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return FunctionsResourceWithStreamingResponse(self) @@ -205,7 +205,7 @@ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncFunctionsResourceWithRawResponse(self) @@ -214,7 +214,7 @@ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncFunctionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index 3b9b0cd2..a5486c34 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -166,7 +166,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -175,7 +175,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index 86dbf99f..65a35472 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -147,7 +147,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -156,7 +156,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py index ac19d849..6fa2925d 100644 --- a/src/gradientai/resources/chat/chat.py +++ b/src/gradientai/resources/chat/chat.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index 62ab8f0d..2d7c94c3 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return CompletionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return CompletionsResourceWithStreamingResponse(self) @@ -193,7 +193,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncCompletionsResourceWithRawResponse(self) @@ -202,7 +202,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncCompletionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py index fcbcf43d..71c59023 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -34,7 +34,7 @@ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return IndexingJobsResourceWithRawResponse(self) @@ -43,7 +43,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return IndexingJobsResourceWithStreamingResponse(self) @@ -260,7 +260,7 @@ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncIndexingJobsResourceWithRawResponse(self) @@ -269,7 +269,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncIndexingJobsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py index c00212f8..6759d09c 100644 --- a/src/gradientai/resources/inference/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -252,7 +252,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -261,7 +261,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py index 325353dc..209d6f17 100644 --- a/src/gradientai/resources/inference/inference.py +++ b/src/gradientai/resources/inference/inference.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> InferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return InferenceResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> InferenceResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return InferenceResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncInferenceResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncInferenceResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/inference/models.py b/src/gradientai/resources/inference/models.py index da327695..42e1dcb2 100644 --- a/src/gradientai/resources/inference/models.py +++ b/src/gradientai/resources/inference/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -106,7 +106,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -115,7 +115,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index b549b3dc..bcd48b74 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> DataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return DataSourcesResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return DataSourcesResourceWithStreamingResponse(self) @@ -202,7 +202,7 @@ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncDataSourcesResourceWithRawResponse(self) @@ -211,7 +211,7 @@ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncDataSourcesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index cf0cd8d8..2cab4f7b 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -46,7 +46,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -55,7 +55,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -322,7 +322,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -331,7 +331,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index 2c7b40ab..c8e78b9b 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -128,7 +128,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -137,7 +137,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/gradientai/resources/providers/anthropic/anthropic.py index 64783563..23a914e9 100644 --- a/src/gradientai/resources/providers/anthropic/anthropic.py +++ b/src/gradientai/resources/providers/anthropic/anthropic.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py index 9c1f6391..d1a33290 100644 --- a/src/gradientai/resources/providers/anthropic/keys.py +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -315,7 +315,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -324,7 +324,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py index 9bfaba8e..01cfee75 100644 --- a/src/gradientai/resources/providers/openai/keys.py +++ b/src/gradientai/resources/providers/openai/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -313,7 +313,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -322,7 +322,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/gradientai/resources/providers/openai/openai.py index d29fd062..b02dc2e1 100644 --- a/src/gradientai/resources/providers/openai/openai.py +++ b/src/gradientai/resources/providers/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/providers.py b/src/gradientai/resources/providers/providers.py index 50e3db1a..ef942f73 100644 --- a/src/gradientai/resources/providers/providers.py +++ b/src/gradientai/resources/providers/providers.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index 43c2038b..4c50d9e6 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> RegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return RegionsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return RegionsResourceWithStreamingResponse(self) @@ -97,7 +97,7 @@ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncRegionsResourceWithRawResponse(self) @@ -106,7 +106,7 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncRegionsResourceWithStreamingResponse(self) From 4a39381883a773df0535f25030c10fb65167d3a0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:31:27 +0000 Subject: [PATCH 052/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 14 ++++---------- src/gradientai/_client.py | 12 ++++++------ tests/test_client.py | 4 ++-- 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9e73986b..34b3d279 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: bae6be3845572f2dadf83c0aad336142 +config_hash: a17cf79d9650def96874dbd8e2416faf diff --git a/README.md b/README.md index 3bba3198..bd72811f 100644 --- a/README.md +++ b/README.md @@ -28,9 +28,7 @@ import os from gradientai import GradientAI client = GradientAI( - api_key=os.environ.get( - "DIGITALOCEAN_GRADIENTAI_API_KEY" - ), # This is the default and can be omitted + api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) versions = client.agents.versions.list( @@ -41,7 +39,7 @@ print(versions.agent_versions) While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `DIGITALOCEAN_GRADIENTAI_API_KEY="My API Key"` to your `.env` file +to add `GRADIENTAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage @@ -54,9 +52,7 @@ import asyncio from gradientai import AsyncGradientAI client = AsyncGradientAI( - api_key=os.environ.get( - "DIGITALOCEAN_GRADIENTAI_API_KEY" - ), # This is the default and can be omitted + api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) @@ -94,9 +90,7 @@ from gradientai import AsyncGradientAI async def main() -> None: async with AsyncGradientAI( - api_key=os.environ.get( - "DIGITALOCEAN_GRADIENTAI_API_KEY" - ), # This is the default and can be omitted + api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: versions = await client.agents.versions.list( diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index f83fb8a7..8710fe68 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -82,13 +82,13 @@ def __init__( ) -> None: """Construct a new synchronous GradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") + api_key = os.environ.get("GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key @@ -301,13 +301,13 @@ def __init__( ) -> None: """Construct a new async AsyncGradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") + api_key = os.environ.get("GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key diff --git a/tests/test_client.py b/tests/test_client.py index f80be1ea..f19a5edb 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -341,7 +341,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): + with update_env(**{"GRADIENTAI_API_KEY": Omit()}): client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 @@ -1153,7 +1153,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): + with update_env(**{"GRADIENTAI_API_KEY": Omit()}): client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 From 099834ee13904ea052473757f0d17b169f4cddd5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 24 Jun 2025 02:22:04 +0000 Subject: [PATCH 053/200] chore(internal): codegen related update --- .github/workflows/create-releases.yml | 38 --------------------------- .github/workflows/publish-pypi.yml | 8 ++++-- .github/workflows/release-doctor.yml | 1 - bin/check-release-environment | 4 --- 4 files changed, 6 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index 04dac49f..00000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Install Rye - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index bff3a970..3dcd6c42 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to PyPI in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 94e02117..d49e26c2 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -18,5 +18,4 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/bin/check-release-environment b/bin/check-release-environment index 78967e8b..b1bd8969 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi From 45d70253e657192dff63069e9cc039992d8a50ed Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 24 Jun 2025 04:40:04 +0000 Subject: [PATCH 054/200] chore(tests): skip some failing tests on the latest python versions --- tests/test_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_client.py b/tests/test_client.py index f19a5edb..11ebd21b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -191,6 +191,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1003,6 +1004,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") From 2a744349d5f7432815680f8a6991341336e8ab4d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:13:24 +0000 Subject: [PATCH 055/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- tests/test_client.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 34b3d279..742d7130 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a17cf79d9650def96874dbd8e2416faf +config_hash: 6082607b38b030ffbcb6f681788d1a88 diff --git a/tests/test_client.py b/tests/test_client.py index 11ebd21b..f19a5edb 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -191,7 +191,6 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" - @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1004,7 +1003,6 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" - @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") From cd52d8c31d447977db83c629592838dc9c89a925 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:15:44 +0000 Subject: [PATCH 056/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 33 +++- src/gradientai/_client.py | 2 +- .../resources/knowledge_bases/data_sources.py | 5 +- src/gradientai/resources/regions/__init__.py | 19 +++ .../resources/{ => regions}/regions.py | 16 +- src/gradientai/types/__init__.py | 5 + src/gradientai/types/agent_list_response.py | 139 +---------------- src/gradientai/types/api_agent.py | 144 +----------------- src/gradientai/types/api_agent_model.py | 57 +++++++ src/gradientai/types/api_agreement.py | 17 +++ src/gradientai/types/api_evaluation_metric.py | 24 +++ src/gradientai/types/api_model.py | 26 +--- src/gradientai/types/api_model_version.py | 15 ++ .../types/api_openai_api_key_info.py | 70 +-------- src/gradientai/types/api_workspace.py | 36 +++++ src/gradientai/types/chat/__init__.py | 1 + .../chat/chat_completion_token_logprob.py | 57 +++++++ .../types/chat/completion_create_response.py | 117 +------------- .../types/knowledge_base_create_params.py | 17 +-- .../types/knowledge_bases/__init__.py | 1 + .../knowledge_bases/aws_data_source_param.py | 19 +++ .../data_source_create_params.py | 17 +-- src/gradientai/types/regions/__init__.py | 6 + .../types/regions/api_evaluation_test_case.py | 46 ++++++ .../types/regions/api_star_metric.py | 19 +++ .../types/regions/evaluation_runs/__init__.py | 3 + tests/api_resources/regions/__init__.py | 1 + 28 files changed, 396 insertions(+), 518 deletions(-) create mode 100644 src/gradientai/resources/regions/__init__.py rename src/gradientai/resources/{ => regions}/regions.py (94%) create mode 100644 src/gradientai/types/api_agent_model.py create mode 100644 src/gradientai/types/api_agreement.py create mode 100644 src/gradientai/types/api_evaluation_metric.py create mode 100644 src/gradientai/types/api_model_version.py create mode 100644 src/gradientai/types/api_workspace.py create mode 100644 src/gradientai/types/chat/chat_completion_token_logprob.py create mode 100644 src/gradientai/types/knowledge_bases/aws_data_source_param.py create mode 100644 src/gradientai/types/regions/__init__.py create mode 100644 src/gradientai/types/regions/api_evaluation_test_case.py create mode 100644 src/gradientai/types/regions/api_star_metric.py create mode 100644 src/gradientai/types/regions/evaluation_runs/__init__.py create mode 100644 tests/api_resources/regions/__init__.py diff --git a/.stats.yml b/.stats.yml index 742d7130..611b679c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 6082607b38b030ffbcb6f681788d1a88 +config_hash: ae932f39d93e617d3f513271503efbcf diff --git a/api.md b/api.md index 2376a11f..d644b609 100644 --- a/api.md +++ b/api.md @@ -6,10 +6,12 @@ Types: from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, + APIAgentModel, APIAnthropicAPIKeyInfo, APIDeploymentVisibility, APIOpenAIAPIKeyInfo, APIRetrievalMethod, + APIWorkspace, AgentCreateResponse, AgentRetrieveResponse, AgentUpdateResponse, @@ -174,12 +176,34 @@ Methods: Types: ```python -from gradientai.types import RegionListResponse +from gradientai.types import APIEvaluationMetric, RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse + +## EvaluationRuns + +### Results + +Types: + +```python +from gradientai.types.regions.evaluation_runs import ( + APIEvaluationMetricResult, + APIEvaluationRun, + APIPrompt, +) +``` + +## EvaluationTestCases + +Types: + +```python +from gradientai.types.regions import APIEvaluationTestCase, APIStarMetric +``` # IndexingJobs @@ -237,6 +261,7 @@ from gradientai.types.knowledge_bases import ( APIKnowledgeBaseDataSource, APISpacesDataSource, APIWebCrawlerDataSource, + AwsDataSource, DataSourceCreateResponse, DataSourceListResponse, DataSourceDeleteResponse, @@ -256,7 +281,7 @@ Methods: Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse ``` Methods: @@ -306,7 +331,7 @@ Methods: Types: ```python -from gradientai.types import APIModel, ModelListResponse +from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 8710fe68..71db35bc 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -33,10 +33,10 @@ if TYPE_CHECKING: from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource - from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index bcd48b74..e05696b9 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -19,6 +19,7 @@ data_source_list_params, data_source_create_params, ) +from ...types.knowledge_bases.aws_data_source_param import AwsDataSourceParam from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse @@ -52,7 +53,7 @@ def create( self, path_knowledge_base_uuid: str, *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + aws_data_source: AwsDataSourceParam | NotGiven = NOT_GIVEN, body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, @@ -219,7 +220,7 @@ async def create( self, path_knowledge_base_uuid: str, *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + aws_data_source: AwsDataSourceParam | NotGiven = NOT_GIVEN, body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, diff --git a/src/gradientai/resources/regions/__init__.py b/src/gradientai/resources/regions/__init__.py new file mode 100644 index 00000000..fb9cf834 --- /dev/null +++ b/src/gradientai/resources/regions/__init__.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .regions import ( + RegionsResource, + AsyncRegionsResource, + RegionsResourceWithRawResponse, + AsyncRegionsResourceWithRawResponse, + RegionsResourceWithStreamingResponse, + AsyncRegionsResourceWithStreamingResponse, +) + +__all__ = [ + "RegionsResource", + "AsyncRegionsResource", + "RegionsResourceWithRawResponse", + "AsyncRegionsResourceWithRawResponse", + "RegionsResourceWithStreamingResponse", + "AsyncRegionsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions/regions.py similarity index 94% rename from src/gradientai/resources/regions.py rename to src/gradientai/resources/regions/regions.py index 4c50d9e6..6662e80a 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions/regions.py @@ -4,19 +4,19 @@ import httpx -from ..types import region_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ...types import region_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.region_list_response import RegionListResponse +from ..._base_client import make_request_options +from ...types.region_list_response import RegionListResponse __all__ = ["RegionsResource", "AsyncRegionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 5ee961c6..22414733 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -4,8 +4,12 @@ from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel +from .api_agreement import APIAgreement as APIAgreement +from .api_workspace import APIWorkspace as APIWorkspace +from .api_agent_model import APIAgentModel as APIAgentModel from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams +from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams @@ -18,6 +22,7 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 6af9cd51..97c0f0d5 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -6,6 +6,7 @@ from .._models import BaseModel from .agents.api_meta import APIMeta +from .api_agent_model import APIAgentModel from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod @@ -17,14 +18,8 @@ "AgentChatbot", "AgentChatbotIdentifier", "AgentDeployment", - "AgentModel", - "AgentModelAgreement", - "AgentModelVersion", "AgentTemplate", "AgentTemplateGuardrail", - "AgentTemplateModel", - "AgentTemplateModelAgreement", - "AgentTemplateModelVersion", ] @@ -74,140 +69,12 @@ class AgentDeployment(BaseModel): visibility: Optional[APIDeploymentVisibility] = None -class AgentModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class AgentModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class AgentModel(BaseModel): - agreement: Optional[AgentModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[AgentModelVersion] = None - - class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None -class AgentTemplateModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class AgentTemplateModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class AgentTemplateModel(BaseModel): - agreement: Optional[AgentTemplateModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[AgentTemplateModelVersion] = None - - class AgentTemplate(BaseModel): created_at: Optional[datetime] = None @@ -225,7 +92,7 @@ class AgentTemplate(BaseModel): max_tokens: Optional[int] = None - model: Optional[AgentTemplateModel] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None @@ -276,7 +143,7 @@ class Agent(BaseModel): response. """ - model: Optional[AgentModel] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 3eb01fc7..1378950a 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -7,6 +7,7 @@ from typing_extensions import Literal from .._models import BaseModel +from .api_agent_model import APIAgentModel from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod from .api_agent_api_key_info import APIAgentAPIKeyInfo @@ -22,14 +23,8 @@ "Deployment", "Function", "Guardrail", - "Model", - "ModelAgreement", - "ModelVersion", "Template", "TemplateGuardrail", - "TemplateModel", - "TemplateModelAgreement", - "TemplateModelVersion", ] @@ -144,140 +139,12 @@ class Guardrail(BaseModel): uuid: Optional[str] = None -class ModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class ModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[ModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[ModelVersion] = None - - class TemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None -class TemplateModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class TemplateModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class TemplateModel(BaseModel): - agreement: Optional[TemplateModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[TemplateModelVersion] = None - - class Template(BaseModel): created_at: Optional[datetime] = None @@ -295,7 +162,7 @@ class Template(BaseModel): max_tokens: Optional[int] = None - model: Optional[TemplateModel] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None @@ -355,7 +222,7 @@ class APIAgent(BaseModel): max_tokens: Optional[int] = None - model: Optional[Model] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None @@ -395,4 +262,7 @@ class APIAgent(BaseModel): uuid: Optional[str] = None - workspace: Optional[object] = None + workspace: Optional["APIWorkspace"] = None + + +from .api_workspace import APIWorkspace diff --git a/src/gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py new file mode 100644 index 00000000..1025321b --- /dev/null +++ b/src/gradientai/types/api_agent_model.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion + +__all__ = ["APIAgentModel"] + + +class APIAgentModel(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py new file mode 100644 index 00000000..c4359f1f --- /dev/null +++ b/src/gradientai/types/api_agreement.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIAgreement"] + + +class APIAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_evaluation_metric.py b/src/gradientai/types/api_evaluation_metric.py new file mode 100644 index 00000000..05390297 --- /dev/null +++ b/src/gradientai/types/api_evaluation_metric.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["APIEvaluationMetric"] + + +class APIEvaluationMetric(BaseModel): + description: Optional[str] = None + + metric_name: Optional[str] = None + + metric_type: Optional[ + Literal["METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL"] + ] = None + + metric_uuid: Optional[str] = None + + metric_value_type: Optional[ + Literal["METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING"] + ] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index ac6f9c55..c2bc1edd 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -4,30 +4,14 @@ from datetime import datetime from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion -__all__ = ["APIModel", "Agreement", "Version"] - - -class Agreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class Version(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None +__all__ = ["APIModel"] class APIModel(BaseModel): - agreement: Optional[Agreement] = None + agreement: Optional[APIAgreement] = None created_at: Optional[datetime] = None @@ -45,4 +29,4 @@ class APIModel(BaseModel): uuid: Optional[str] = None - version: Optional[Version] = None + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py new file mode 100644 index 00000000..2e118632 --- /dev/null +++ b/src/gradientai/types/api_model_version.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIModelVersion"] + + +class APIModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index 0f57136d..7467cfc2 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -2,75 +2,11 @@ from typing import List, Optional from datetime import datetime -from typing_extensions import Literal from .._models import BaseModel +from .api_agent_model import APIAgentModel -__all__ = ["APIOpenAIAPIKeyInfo", "Model", "ModelAgreement", "ModelVersion"] - - -class ModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class ModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[ModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[ModelVersion] = None +__all__ = ["APIOpenAIAPIKeyInfo"] class APIOpenAIAPIKeyInfo(BaseModel): @@ -80,7 +16,7 @@ class APIOpenAIAPIKeyInfo(BaseModel): deleted_at: Optional[datetime] = None - models: Optional[List[Model]] = None + models: Optional[List[APIAgentModel]] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py new file mode 100644 index 00000000..b170d504 --- /dev/null +++ b/src/gradientai/types/api_workspace.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .regions.api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["APIWorkspace"] + + +class APIWorkspace(BaseModel): + agents: Optional[List["APIAgent"]] = None + + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + created_by_email: Optional[str] = None + + deleted_at: Optional[datetime] = None + + description: Optional[str] = None + + evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py index 9384ac14..59553f68 100644 --- a/src/gradientai/types/chat/__init__.py +++ b/src/gradientai/types/chat/__init__.py @@ -4,3 +4,4 @@ from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/chat/chat_completion_token_logprob.py b/src/gradientai/types/chat/chat_completion_token_logprob.py new file mode 100644 index 00000000..c69e2589 --- /dev/null +++ b/src/gradientai/types/chat/chat_completion_token_logprob.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] + + +class TopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChatCompletionTokenLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[TopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py index 5a25ac7c..1ac59a28 100644 --- a/src/gradientai/types/chat/completion_create_response.py +++ b/src/gradientai/types/chat/completion_create_response.py @@ -4,125 +4,16 @@ from typing_extensions import Literal from ..._models import BaseModel +from .chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = [ - "CompletionCreateResponse", - "Choice", - "ChoiceLogprobs", - "ChoiceLogprobsContent", - "ChoiceLogprobsContentTopLogprob", - "ChoiceLogprobsRefusal", - "ChoiceLogprobsRefusalTopLogprob", - "ChoiceMessage", - "Usage", -] - - -class ChoiceLogprobsContentTopLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - -class ChoiceLogprobsContent(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - top_logprobs: List[ChoiceLogprobsContentTopLogprob] - """List of the most likely tokens and their log probability, at this token - position. - - In rare cases, there may be fewer than the number of requested `top_logprobs` - returned. - """ - - -class ChoiceLogprobsRefusalTopLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - -class ChoiceLogprobsRefusal(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] - """List of the most likely tokens and their log probability, at this token - position. - - In rare cases, there may be fewer than the number of requested `top_logprobs` - returned. - """ +__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] class ChoiceLogprobs(BaseModel): - content: Optional[List[ChoiceLogprobsContent]] = None + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" - refusal: Optional[List[ChoiceLogprobsRefusal]] = None + refusal: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message refusal tokens with log probability information.""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index 2552bcf6..acf52e30 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -5,11 +5,12 @@ from typing import List, Iterable from typing_extensions import TypedDict +from .knowledge_bases.aws_data_source_param import AwsDataSourceParam from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceAwsDataSource"] +__all__ = ["KnowledgeBaseCreateParams", "Datasource"] class KnowledgeBaseCreateParams(TypedDict, total=False): @@ -49,20 +50,8 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): vpc_uuid: str -class DatasourceAwsDataSource(TypedDict, total=False): - bucket_name: str - - item_path: str - - key_id: str - - region: str - - secret_key: str - - class Datasource(TypedDict, total=False): - aws_data_source: DatasourceAwsDataSource + aws_data_source: AwsDataSourceParam bucket_name: str diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index f5f31034..859c3618 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py new file mode 100644 index 00000000..93d49228 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/aws_data_source_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AwsDataSourceParam"] + + +class AwsDataSourceParam(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py index b1abafdf..22bd76e7 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -5,29 +5,18 @@ from typing_extensions import Annotated, TypedDict from ..._utils import PropertyInfo +from .aws_data_source_param import AwsDataSourceParam from .api_spaces_data_source_param import APISpacesDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["DataSourceCreateParams", "AwsDataSource"] +__all__ = ["DataSourceCreateParams"] class DataSourceCreateParams(TypedDict, total=False): - aws_data_source: AwsDataSource + aws_data_source: AwsDataSourceParam body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] spaces_data_source: APISpacesDataSourceParam web_crawler_data_source: APIWebCrawlerDataSourceParam - - -class AwsDataSource(TypedDict, total=False): - bucket_name: str - - item_path: str - - key_id: str - - region: str - - secret_key: str diff --git a/src/gradientai/types/regions/__init__.py b/src/gradientai/types/regions/__init__.py new file mode 100644 index 00000000..83b21099 --- /dev/null +++ b/src/gradientai/types/regions/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_star_metric import APIStarMetric as APIStarMetric +from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase diff --git a/src/gradientai/types/regions/api_evaluation_test_case.py b/src/gradientai/types/regions/api_evaluation_test_case.py new file mode 100644 index 00000000..d799b0e0 --- /dev/null +++ b/src/gradientai/types/regions/api_evaluation_test_case.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel +from .api_star_metric import APIStarMetric +from ..api_evaluation_metric import APIEvaluationMetric + +__all__ = ["APIEvaluationTestCase"] + + +class APIEvaluationTestCase(BaseModel): + archived_at: Optional[datetime] = None + + created_at: Optional[datetime] = None + + created_by_user_email: Optional[str] = None + + created_by_user_id: Optional[str] = None + + dataset_name: Optional[str] = None + + dataset_uuid: Optional[str] = None + + description: Optional[str] = None + + latest_version_number_of_runs: Optional[int] = None + + metrics: Optional[List[APIEvaluationMetric]] = None + + name: Optional[str] = None + + star_metric: Optional[APIStarMetric] = None + + test_case_uuid: Optional[str] = None + + total_runs: Optional[int] = None + + updated_at: Optional[datetime] = None + + updated_by_user_email: Optional[str] = None + + updated_by_user_id: Optional[str] = None + + version: Optional[int] = None diff --git a/src/gradientai/types/regions/api_star_metric.py b/src/gradientai/types/regions/api_star_metric.py new file mode 100644 index 00000000..c9ecc60a --- /dev/null +++ b/src/gradientai/types/regions/api_star_metric.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APIStarMetric"] + + +class APIStarMetric(BaseModel): + metric_uuid: Optional[str] = None + + name: Optional[str] = None + + success_threshold_pct: Optional[int] = None + """ + The success threshold for the star metric. This is a percentage value between 0 + and 100. + """ diff --git a/src/gradientai/types/regions/evaluation_runs/__init__.py b/src/gradientai/types/regions/evaluation_runs/__init__.py new file mode 100644 index 00000000..f8ee8b14 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/tests/api_resources/regions/__init__.py b/tests/api_resources/regions/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/regions/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. From 92dd94642a3538424269e2a711370619f86e67e2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:19:15 +0000 Subject: [PATCH 057/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- README.md | 7 +- api.md | 59 +- src/gradientai/resources/regions/__init__.py | 42 ++ .../resources/regions/evaluation_datasets.py | 292 +++++++++ .../regions/evaluation_runs/__init__.py | 33 + .../evaluation_runs/evaluation_runs.py | 316 +++++++++ .../regions/evaluation_runs/results.py | 264 ++++++++ .../regions/evaluation_test_cases.py | 618 ++++++++++++++++++ src/gradientai/resources/regions/regions.py | 157 +++++ src/gradientai/types/__init__.py | 3 + ...region_list_evaluation_metrics_response.py | 12 + src/gradientai/types/regions/__init__.py | 26 + .../types/regions/api_star_metric_param.py | 19 + ...reate_file_upload_presigned_urls_params.py | 20 + ...ate_file_upload_presigned_urls_response.py | 30 + .../evaluation_dataset_create_params.py | 17 + .../evaluation_dataset_create_response.py | 12 + .../regions/evaluation_run_create_params.py | 17 + .../regions/evaluation_run_create_response.py | 11 + .../evaluation_run_retrieve_response.py | 12 + .../types/regions/evaluation_runs/__init__.py | 6 + .../api_evaluation_metric_result.py | 17 + .../evaluation_runs/api_evaluation_run.py | 56 ++ .../regions/evaluation_runs/api_prompt.py | 42 ++ .../result_retrieve_prompt_response.py | 12 + .../result_retrieve_response.py | 16 + .../evaluation_test_case_create_params.py | 29 + .../evaluation_test_case_create_response.py | 12 + ...n_test_case_list_evaluation_runs_params.py | 12 + ...test_case_list_evaluation_runs_response.py | 13 + .../evaluation_test_case_list_response.py | 12 + .../evaluation_test_case_retrieve_response.py | 12 + .../evaluation_test_case_update_params.py | 32 + .../evaluation_test_case_update_response.py | 14 + .../regions/evaluation_runs/__init__.py | 1 + .../regions/evaluation_runs/test_results.py | 200 ++++++ .../regions/test_evaluation_datasets.py | 211 ++++++ .../regions/test_evaluation_runs.py | 187 ++++++ .../regions/test_evaluation_test_cases.py | 486 ++++++++++++++ tests/api_resources/test_regions.py | 58 +- 41 files changed, 3390 insertions(+), 9 deletions(-) create mode 100644 src/gradientai/resources/regions/evaluation_datasets.py create mode 100644 src/gradientai/resources/regions/evaluation_runs/__init__.py create mode 100644 src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py create mode 100644 src/gradientai/resources/regions/evaluation_runs/results.py create mode 100644 src/gradientai/resources/regions/evaluation_test_cases.py create mode 100644 src/gradientai/types/region_list_evaluation_metrics_response.py create mode 100644 src/gradientai/types/regions/api_star_metric_param.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_params.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_response.py create mode 100644 src/gradientai/types/regions/evaluation_run_create_params.py create mode 100644 src/gradientai/types/regions/evaluation_run_create_response.py create mode 100644 src/gradientai/types/regions/evaluation_run_retrieve_response.py create mode 100644 src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py create mode 100644 src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py create mode 100644 src/gradientai/types/regions/evaluation_runs/api_prompt.py create mode 100644 src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py create mode 100644 src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_create_params.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_create_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_list_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_retrieve_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_update_params.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_update_response.py create mode 100644 tests/api_resources/regions/evaluation_runs/__init__.py create mode 100644 tests/api_resources/regions/evaluation_runs/test_results.py create mode 100644 tests/api_resources/regions/test_evaluation_datasets.py create mode 100644 tests/api_resources/regions/test_evaluation_runs.py create mode 100644 tests/api_resources/regions/test_evaluation_test_cases.py diff --git a/.stats.yml b/.stats.yml index 611b679c..f0863f5f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 58 +configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: ae932f39d93e617d3f513271503efbcf +config_hash: 211ece2994c6ac52f84f78ee56c1097a diff --git a/README.md b/README.md index bd72811f..09b1e15d 100644 --- a/README.md +++ b/README.md @@ -120,11 +120,10 @@ from gradientai import GradientAI client = GradientAI() -data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={}, +evaluation_test_case = client.regions.evaluation_test_cases.create( + star_metric={}, ) -print(data_source.aws_data_source) +print(evaluation_test_case.star_metric) ``` ## Handling errors diff --git a/api.md b/api.md index d644b609..970f6951 100644 --- a/api.md +++ b/api.md @@ -176,15 +176,31 @@ Methods: Types: ```python -from gradientai.types import APIEvaluationMetric, RegionListResponse +from gradientai.types import ( + APIEvaluationMetric, + RegionListResponse, + RegionListEvaluationMetricsResponse, +) ``` Methods: - client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse ## EvaluationRuns +Types: + +```python +from gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse +``` + +Methods: + +- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse + ### Results Types: @@ -194,17 +210,56 @@ from gradientai.types.regions.evaluation_runs import ( APIEvaluationMetricResult, APIEvaluationRun, APIPrompt, + ResultRetrieveResponse, + ResultRetrievePromptResponse, ) ``` +Methods: + +- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse +- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse + ## EvaluationTestCases Types: ```python -from gradientai.types.regions import APIEvaluationTestCase, APIStarMetric +from gradientai.types.regions import ( + APIEvaluationTestCase, + APIStarMetric, + EvaluationTestCaseCreateResponse, + EvaluationTestCaseRetrieveResponse, + EvaluationTestCaseUpdateResponse, + EvaluationTestCaseListResponse, + EvaluationTestCaseListEvaluationRunsResponse, +) ``` +Methods: + +- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse +- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse + +## EvaluationDatasets + +Types: + +```python +from gradientai.types.regions import ( + EvaluationDatasetCreateResponse, + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) +``` + +Methods: + +- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse + # IndexingJobs Types: diff --git a/src/gradientai/resources/regions/__init__.py b/src/gradientai/resources/regions/__init__.py index fb9cf834..51a96d61 100644 --- a/src/gradientai/resources/regions/__init__.py +++ b/src/gradientai/resources/regions/__init__.py @@ -8,8 +8,50 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) +from .evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) +from .evaluation_datasets import ( + EvaluationDatasetsResource, + AsyncEvaluationDatasetsResource, + EvaluationDatasetsResourceWithRawResponse, + AsyncEvaluationDatasetsResourceWithRawResponse, + EvaluationDatasetsResourceWithStreamingResponse, + AsyncEvaluationDatasetsResourceWithStreamingResponse, +) +from .evaluation_test_cases import ( + EvaluationTestCasesResource, + AsyncEvaluationTestCasesResource, + EvaluationTestCasesResourceWithRawResponse, + AsyncEvaluationTestCasesResourceWithRawResponse, + EvaluationTestCasesResourceWithStreamingResponse, + AsyncEvaluationTestCasesResourceWithStreamingResponse, +) __all__ = [ + "EvaluationRunsResource", + "AsyncEvaluationRunsResource", + "EvaluationRunsResourceWithRawResponse", + "AsyncEvaluationRunsResourceWithRawResponse", + "EvaluationRunsResourceWithStreamingResponse", + "AsyncEvaluationRunsResourceWithStreamingResponse", + "EvaluationTestCasesResource", + "AsyncEvaluationTestCasesResource", + "EvaluationTestCasesResourceWithRawResponse", + "AsyncEvaluationTestCasesResourceWithRawResponse", + "EvaluationTestCasesResourceWithStreamingResponse", + "AsyncEvaluationTestCasesResourceWithStreamingResponse", + "EvaluationDatasetsResource", + "AsyncEvaluationDatasetsResource", + "EvaluationDatasetsResourceWithRawResponse", + "AsyncEvaluationDatasetsResourceWithRawResponse", + "EvaluationDatasetsResourceWithStreamingResponse", + "AsyncEvaluationDatasetsResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", diff --git a/src/gradientai/resources/regions/evaluation_datasets.py b/src/gradientai/resources/regions/evaluation_datasets.py new file mode 100644 index 00000000..f82e9701 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_datasets.py @@ -0,0 +1,292 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.regions import ( + evaluation_dataset_create_params, + evaluation_dataset_create_file_upload_presigned_urls_params, +) +from ...types.regions.evaluation_dataset_create_response import EvaluationDatasetCreateResponse +from ...types.knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam +from ...types.regions.evaluation_dataset_create_file_upload_presigned_urls_response import ( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) + +__all__ = ["EvaluationDatasetsResource", "AsyncEvaluationDatasetsResource"] + + +class EvaluationDatasetsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EvaluationDatasetsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationDatasetsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationDatasetsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationDatasetsResourceWithStreamingResponse(self) + + def create( + self, + *, + file_upload_dataset: APIFileUploadDataSourceParam | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateResponse: + """ + To create an evaluation dataset, send a POST request to + `/v2/gen-ai/evaluation_datasets`. + + Args: + file_upload_dataset: File to upload as data source for knowledge base. + + name: The name of the agent evaluation dataset. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_datasets" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets", + body=maybe_transform( + { + "file_upload_dataset": file_upload_dataset, + "name": name, + }, + evaluation_dataset_create_params.EvaluationDatasetCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateResponse, + ) + + def create_file_upload_presigned_urls( + self, + *, + files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse: + """ + To create presigned URLs for evaluation dataset file upload, send a POST request + to `/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls", + body=maybe_transform( + {"files": files}, + evaluation_dataset_create_file_upload_presigned_urls_params.EvaluationDatasetCreateFileUploadPresignedURLsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateFileUploadPresignedURLsResponse, + ) + + +class AsyncEvaluationDatasetsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationDatasetsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationDatasetsResourceWithStreamingResponse(self) + + async def create( + self, + *, + file_upload_dataset: APIFileUploadDataSourceParam | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateResponse: + """ + To create an evaluation dataset, send a POST request to + `/v2/gen-ai/evaluation_datasets`. + + Args: + file_upload_dataset: File to upload as data source for knowledge base. + + name: The name of the agent evaluation dataset. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_datasets" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets", + body=await async_maybe_transform( + { + "file_upload_dataset": file_upload_dataset, + "name": name, + }, + evaluation_dataset_create_params.EvaluationDatasetCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateResponse, + ) + + async def create_file_upload_presigned_urls( + self, + *, + files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse: + """ + To create presigned URLs for evaluation dataset file upload, send a POST request + to `/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls", + body=await async_maybe_transform( + {"files": files}, + evaluation_dataset_create_file_upload_presigned_urls_params.EvaluationDatasetCreateFileUploadPresignedURLsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateFileUploadPresignedURLsResponse, + ) + + +class EvaluationDatasetsResourceWithRawResponse: + def __init__(self, evaluation_datasets: EvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = to_raw_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = to_raw_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) + + +class AsyncEvaluationDatasetsResourceWithRawResponse: + def __init__(self, evaluation_datasets: AsyncEvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = async_to_raw_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = async_to_raw_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) + + +class EvaluationDatasetsResourceWithStreamingResponse: + def __init__(self, evaluation_datasets: EvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = to_streamed_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = to_streamed_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) + + +class AsyncEvaluationDatasetsResourceWithStreamingResponse: + def __init__(self, evaluation_datasets: AsyncEvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = async_to_streamed_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = async_to_streamed_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) diff --git a/src/gradientai/resources/regions/evaluation_runs/__init__.py b/src/gradientai/resources/regions/evaluation_runs/__init__.py new file mode 100644 index 00000000..e5580dd0 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_runs/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .results import ( + ResultsResource, + AsyncResultsResource, + ResultsResourceWithRawResponse, + AsyncResultsResourceWithRawResponse, + ResultsResourceWithStreamingResponse, + AsyncResultsResourceWithStreamingResponse, +) +from .evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) + +__all__ = [ + "ResultsResource", + "AsyncResultsResource", + "ResultsResourceWithRawResponse", + "AsyncResultsResourceWithRawResponse", + "ResultsResourceWithStreamingResponse", + "AsyncResultsResourceWithStreamingResponse", + "EvaluationRunsResource", + "AsyncEvaluationRunsResource", + "EvaluationRunsResourceWithRawResponse", + "AsyncEvaluationRunsResourceWithRawResponse", + "EvaluationRunsResourceWithStreamingResponse", + "AsyncEvaluationRunsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py b/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py new file mode 100644 index 00000000..9221c45c --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py @@ -0,0 +1,316 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .results import ( + ResultsResource, + AsyncResultsResource, + ResultsResourceWithRawResponse, + AsyncResultsResourceWithRawResponse, + ResultsResourceWithStreamingResponse, + AsyncResultsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.regions import evaluation_run_create_params +from ....types.regions.evaluation_run_create_response import EvaluationRunCreateResponse +from ....types.regions.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse + +__all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"] + + +class EvaluationRunsResource(SyncAPIResource): + @cached_property + def results(self) -> ResultsResource: + return ResultsResource(self._client) + + @cached_property + def with_raw_response(self) -> EvaluationRunsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationRunsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationRunsResourceWithStreamingResponse(self) + + def create( + self, + *, + agent_uuid: str | NotGiven = NOT_GIVEN, + run_name: str | NotGiven = NOT_GIVEN, + test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunCreateResponse: + """ + To run an evaluation test case, send a POST request to + `/v2/gen-ai/evaluation_runs`. + + Args: + agent_uuid: Agent UUID to run the test case against. + + run_name: The name of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_runs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs", + body=maybe_transform( + { + "agent_uuid": agent_uuid, + "run_name": run_name, + "test_case_uuid": test_case_uuid, + }, + evaluation_run_create_params.EvaluationRunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunCreateResponse, + ) + + def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunRetrieveResponse: + """ + To retrive information about an existing evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunRetrieveResponse, + ) + + +class AsyncEvaluationRunsResource(AsyncAPIResource): + @cached_property + def results(self) -> AsyncResultsResource: + return AsyncResultsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncEvaluationRunsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationRunsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationRunsResourceWithStreamingResponse(self) + + async def create( + self, + *, + agent_uuid: str | NotGiven = NOT_GIVEN, + run_name: str | NotGiven = NOT_GIVEN, + test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunCreateResponse: + """ + To run an evaluation test case, send a POST request to + `/v2/gen-ai/evaluation_runs`. + + Args: + agent_uuid: Agent UUID to run the test case against. + + run_name: The name of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_runs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs", + body=await async_maybe_transform( + { + "agent_uuid": agent_uuid, + "run_name": run_name, + "test_case_uuid": test_case_uuid, + }, + evaluation_run_create_params.EvaluationRunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunCreateResponse, + ) + + async def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunRetrieveResponse: + """ + To retrive information about an existing evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunRetrieveResponse, + ) + + +class EvaluationRunsResourceWithRawResponse: + def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = to_raw_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = to_raw_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> ResultsResourceWithRawResponse: + return ResultsResourceWithRawResponse(self._evaluation_runs.results) + + +class AsyncEvaluationRunsResourceWithRawResponse: + def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = async_to_raw_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> AsyncResultsResourceWithRawResponse: + return AsyncResultsResourceWithRawResponse(self._evaluation_runs.results) + + +class EvaluationRunsResourceWithStreamingResponse: + def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = to_streamed_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = to_streamed_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> ResultsResourceWithStreamingResponse: + return ResultsResourceWithStreamingResponse(self._evaluation_runs.results) + + +class AsyncEvaluationRunsResourceWithStreamingResponse: + def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = async_to_streamed_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> AsyncResultsResourceWithStreamingResponse: + return AsyncResultsResourceWithStreamingResponse(self._evaluation_runs.results) diff --git a/src/gradientai/resources/regions/evaluation_runs/results.py b/src/gradientai/resources/regions/evaluation_runs/results.py new file mode 100644 index 00000000..ad74a778 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_runs/results.py @@ -0,0 +1,264 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.regions.evaluation_runs.result_retrieve_response import ResultRetrieveResponse +from ....types.regions.evaluation_runs.result_retrieve_prompt_response import ResultRetrievePromptResponse + +__all__ = ["ResultsResource", "AsyncResultsResource"] + + +class ResultsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ResultsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ResultsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ResultsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ResultsResourceWithStreamingResponse(self) + + def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrieveResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrieveResponse, + ) + + def retrieve_prompt( + self, + prompt_id: int, + *, + evaluation_run_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrievePromptResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrievePromptResponse, + ) + + +class AsyncResultsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncResultsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncResultsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncResultsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncResultsResourceWithStreamingResponse(self) + + async def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrieveResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrieveResponse, + ) + + async def retrieve_prompt( + self, + prompt_id: int, + *, + evaluation_run_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrievePromptResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrievePromptResponse, + ) + + +class ResultsResourceWithRawResponse: + def __init__(self, results: ResultsResource) -> None: + self._results = results + + self.retrieve = to_raw_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = to_raw_response_wrapper( + results.retrieve_prompt, + ) + + +class AsyncResultsResourceWithRawResponse: + def __init__(self, results: AsyncResultsResource) -> None: + self._results = results + + self.retrieve = async_to_raw_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = async_to_raw_response_wrapper( + results.retrieve_prompt, + ) + + +class ResultsResourceWithStreamingResponse: + def __init__(self, results: ResultsResource) -> None: + self._results = results + + self.retrieve = to_streamed_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = to_streamed_response_wrapper( + results.retrieve_prompt, + ) + + +class AsyncResultsResourceWithStreamingResponse: + def __init__(self, results: AsyncResultsResource) -> None: + self._results = results + + self.retrieve = async_to_streamed_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = async_to_streamed_response_wrapper( + results.retrieve_prompt, + ) diff --git a/src/gradientai/resources/regions/evaluation_test_cases.py b/src/gradientai/resources/regions/evaluation_test_cases.py new file mode 100644 index 00000000..eed4d8b4 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_test_cases.py @@ -0,0 +1,618 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.regions import ( + evaluation_test_case_create_params, + evaluation_test_case_update_params, + evaluation_test_case_list_evaluation_runs_params, +) +from ...types.regions.api_star_metric_param import APIStarMetricParam +from ...types.regions.evaluation_test_case_list_response import EvaluationTestCaseListResponse +from ...types.regions.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse +from ...types.regions.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse +from ...types.regions.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse +from ...types.regions.evaluation_test_case_list_evaluation_runs_response import ( + EvaluationTestCaseListEvaluationRunsResponse, +) + +__all__ = ["EvaluationTestCasesResource", "AsyncEvaluationTestCasesResource"] + + +class EvaluationTestCasesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EvaluationTestCasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationTestCasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationTestCasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationTestCasesResourceWithStreamingResponse(self) + + def create( + self, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: List[str] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseCreateResponse: + """ + To create an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + metrics: Full metric list to use for evaluation test case. + + name: Name of the test case. + + workspace_uuid: The workspace uuid. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + body=maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "workspace_uuid": workspace_uuid, + }, + evaluation_test_case_create_params.EvaluationTestCaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseCreateResponse, + ) + + def retrieve( + self, + test_case_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseRetrieveResponse: + """ + To retrive information about an existing evaluation test case, send a GET + request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not test_case_uuid: + raise ValueError(f"Expected a non-empty value for `test_case_uuid` but received {test_case_uuid!r}") + return self._get( + f"/v2/gen-ai/evaluation_test_cases/{test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseRetrieveResponse, + ) + + def update( + self, + path_test_case_uuid: str, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: evaluation_test_case_update_params.Metrics | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + body_test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseUpdateResponse: + """ + To update an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + name: Name of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", + body=maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "body_test_case_uuid": body_test_case_uuid, + }, + evaluation_test_case_update_params.EvaluationTestCaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseUpdateResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListResponse: + """ + To list all evaluation test cases, send a GET request to + `/v2/gen-ai/evaluation_test_cases`. + """ + return self._get( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseListResponse, + ) + + def list_evaluation_runs( + self, + evaluation_test_case_uuid: str, + *, + evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListEvaluationRunsResponse: + """ + To list all evaluation runs by test case, send a GET request to + `/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs`. + + Args: + evaluation_test_case_version: Version of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_test_case_uuid` but received {evaluation_test_case_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"evaluation_test_case_version": evaluation_test_case_version}, + evaluation_test_case_list_evaluation_runs_params.EvaluationTestCaseListEvaluationRunsParams, + ), + ), + cast_to=EvaluationTestCaseListEvaluationRunsResponse, + ) + + +class AsyncEvaluationTestCasesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationTestCasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationTestCasesResourceWithStreamingResponse(self) + + async def create( + self, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: List[str] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseCreateResponse: + """ + To create an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + metrics: Full metric list to use for evaluation test case. + + name: Name of the test case. + + workspace_uuid: The workspace uuid. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + body=await async_maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "workspace_uuid": workspace_uuid, + }, + evaluation_test_case_create_params.EvaluationTestCaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseCreateResponse, + ) + + async def retrieve( + self, + test_case_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseRetrieveResponse: + """ + To retrive information about an existing evaluation test case, send a GET + request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not test_case_uuid: + raise ValueError(f"Expected a non-empty value for `test_case_uuid` but received {test_case_uuid!r}") + return await self._get( + f"/v2/gen-ai/evaluation_test_cases/{test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseRetrieveResponse, + ) + + async def update( + self, + path_test_case_uuid: str, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: evaluation_test_case_update_params.Metrics | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + body_test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseUpdateResponse: + """ + To update an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + name: Name of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", + body=await async_maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "body_test_case_uuid": body_test_case_uuid, + }, + evaluation_test_case_update_params.EvaluationTestCaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseUpdateResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListResponse: + """ + To list all evaluation test cases, send a GET request to + `/v2/gen-ai/evaluation_test_cases`. + """ + return await self._get( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseListResponse, + ) + + async def list_evaluation_runs( + self, + evaluation_test_case_uuid: str, + *, + evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListEvaluationRunsResponse: + """ + To list all evaluation runs by test case, send a GET request to + `/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs`. + + Args: + evaluation_test_case_version: Version of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_test_case_uuid` but received {evaluation_test_case_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"evaluation_test_case_version": evaluation_test_case_version}, + evaluation_test_case_list_evaluation_runs_params.EvaluationTestCaseListEvaluationRunsParams, + ), + ), + cast_to=EvaluationTestCaseListEvaluationRunsResponse, + ) + + +class EvaluationTestCasesResourceWithRawResponse: + def __init__(self, evaluation_test_cases: EvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = to_raw_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = to_raw_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = to_raw_response_wrapper( + evaluation_test_cases.update, + ) + self.list = to_raw_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = to_raw_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) + + +class AsyncEvaluationTestCasesResourceWithRawResponse: + def __init__(self, evaluation_test_cases: AsyncEvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = async_to_raw_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = async_to_raw_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = async_to_raw_response_wrapper( + evaluation_test_cases.update, + ) + self.list = async_to_raw_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = async_to_raw_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) + + +class EvaluationTestCasesResourceWithStreamingResponse: + def __init__(self, evaluation_test_cases: EvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = to_streamed_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = to_streamed_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = to_streamed_response_wrapper( + evaluation_test_cases.update, + ) + self.list = to_streamed_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = to_streamed_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) + + +class AsyncEvaluationTestCasesResourceWithStreamingResponse: + def __init__(self, evaluation_test_cases: AsyncEvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = async_to_streamed_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + evaluation_test_cases.update, + ) + self.list = async_to_streamed_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = async_to_streamed_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) diff --git a/src/gradientai/resources/regions/regions.py b/src/gradientai/resources/regions/regions.py index 6662e80a..5f74b2e8 100644 --- a/src/gradientai/resources/regions/regions.py +++ b/src/gradientai/resources/regions/regions.py @@ -16,12 +16,49 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from .evaluation_datasets import ( + EvaluationDatasetsResource, + AsyncEvaluationDatasetsResource, + EvaluationDatasetsResourceWithRawResponse, + AsyncEvaluationDatasetsResourceWithRawResponse, + EvaluationDatasetsResourceWithStreamingResponse, + AsyncEvaluationDatasetsResourceWithStreamingResponse, +) +from .evaluation_test_cases import ( + EvaluationTestCasesResource, + AsyncEvaluationTestCasesResource, + EvaluationTestCasesResourceWithRawResponse, + AsyncEvaluationTestCasesResourceWithRawResponse, + EvaluationTestCasesResourceWithStreamingResponse, + AsyncEvaluationTestCasesResourceWithStreamingResponse, +) from ...types.region_list_response import RegionListResponse +from .evaluation_runs.evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) +from ...types.region_list_evaluation_metrics_response import RegionListEvaluationMetricsResponse __all__ = ["RegionsResource", "AsyncRegionsResource"] class RegionsResource(SyncAPIResource): + @cached_property + def evaluation_runs(self) -> EvaluationRunsResource: + return EvaluationRunsResource(self._client) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResource: + return EvaluationTestCasesResource(self._client) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResource: + return EvaluationDatasetsResource(self._client) + @cached_property def with_raw_response(self) -> RegionsResourceWithRawResponse: """ @@ -89,8 +126,44 @@ def list( cast_to=RegionListResponse, ) + def list_evaluation_metrics( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListEvaluationMetricsResponse: + """ + To list all evaluation metrics, send a GET request to + `/v2/gen-ai/evaluation_metrics`. + """ + return self._get( + "/v2/gen-ai/evaluation_metrics" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RegionListEvaluationMetricsResponse, + ) + class AsyncRegionsResource(AsyncAPIResource): + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResource: + return AsyncEvaluationRunsResource(self._client) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource: + return AsyncEvaluationTestCasesResource(self._client) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource: + return AsyncEvaluationDatasetsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: """ @@ -158,6 +231,30 @@ async def list( cast_to=RegionListResponse, ) + async def list_evaluation_metrics( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListEvaluationMetricsResponse: + """ + To list all evaluation metrics, send a GET request to + `/v2/gen-ai/evaluation_metrics`. + """ + return await self._get( + "/v2/gen-ai/evaluation_metrics" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RegionListEvaluationMetricsResponse, + ) + class RegionsResourceWithRawResponse: def __init__(self, regions: RegionsResource) -> None: @@ -166,6 +263,21 @@ def __init__(self, regions: RegionsResource) -> None: self.list = to_raw_response_wrapper( regions.list, ) + self.list_evaluation_metrics = to_raw_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse: + return EvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse: + return EvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse: + return EvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets) class AsyncRegionsResourceWithRawResponse: @@ -175,6 +287,21 @@ def __init__(self, regions: AsyncRegionsResource) -> None: self.list = async_to_raw_response_wrapper( regions.list, ) + self.list_evaluation_metrics = async_to_raw_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse: + return AsyncEvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: + return AsyncEvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: + return AsyncEvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets) class RegionsResourceWithStreamingResponse: @@ -184,6 +311,21 @@ def __init__(self, regions: RegionsResource) -> None: self.list = to_streamed_response_wrapper( regions.list, ) + self.list_evaluation_metrics = to_streamed_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse: + return EvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse: + return EvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse: + return EvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets) class AsyncRegionsResourceWithStreamingResponse: @@ -193,3 +335,18 @@ def __init__(self, regions: AsyncRegionsResource) -> None: self.list = async_to_streamed_response_wrapper( regions.list, ) + self.list_evaluation_metrics = async_to_streamed_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse: + return AsyncEvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse: + return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse: + return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 22414733..d09aaa2a 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -45,6 +45,9 @@ from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .region_list_evaluation_metrics_response import ( + RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, +) from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) diff --git a/src/gradientai/types/region_list_evaluation_metrics_response.py b/src/gradientai/types/region_list_evaluation_metrics_response.py new file mode 100644 index 00000000..c57b71d1 --- /dev/null +++ b/src/gradientai/types/region_list_evaluation_metrics_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .api_evaluation_metric import APIEvaluationMetric + +__all__ = ["RegionListEvaluationMetricsResponse"] + + +class RegionListEvaluationMetricsResponse(BaseModel): + metrics: Optional[List[APIEvaluationMetric]] = None diff --git a/src/gradientai/types/regions/__init__.py b/src/gradientai/types/regions/__init__.py index 83b21099..695ba3b4 100644 --- a/src/gradientai/types/regions/__init__.py +++ b/src/gradientai/types/regions/__init__.py @@ -3,4 +3,30 @@ from __future__ import annotations from .api_star_metric import APIStarMetric as APIStarMetric +from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase +from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams +from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse +from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams +from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse +from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse +from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams +from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse +from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams +from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse +from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse +from .evaluation_test_case_retrieve_response import ( + EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, +) +from .evaluation_test_case_list_evaluation_runs_params import ( + EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams, +) +from .evaluation_test_case_list_evaluation_runs_response import ( + EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse, +) +from .evaluation_dataset_create_file_upload_presigned_urls_params import ( + EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams, +) +from .evaluation_dataset_create_file_upload_presigned_urls_response import ( + EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) diff --git a/src/gradientai/types/regions/api_star_metric_param.py b/src/gradientai/types/regions/api_star_metric_param.py new file mode 100644 index 00000000..5f7b2fd9 --- /dev/null +++ b/src/gradientai/types/regions/api_star_metric_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIStarMetricParam"] + + +class APIStarMetricParam(TypedDict, total=False): + metric_uuid: str + + name: str + + success_threshold_pct: int + """ + The success threshold for the star metric. This is a percentage value between 0 + and 100. + """ diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py new file mode 100644 index 00000000..6aa6d27a --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import TypedDict + +__all__ = ["EvaluationDatasetCreateFileUploadPresignedURLsParams", "File"] + + +class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=False): + files: Iterable[File] + """A list of files to generate presigned URLs for.""" + + +class File(TypedDict, total=False): + file_name: str + + file_size: str + """The size of the file in bytes.""" diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py new file mode 100644 index 00000000..bee94c93 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["EvaluationDatasetCreateFileUploadPresignedURLsResponse", "Upload"] + + +class Upload(BaseModel): + expires_at: Optional[datetime] = None + """The time the url expires at.""" + + object_key: Optional[str] = None + """The unique object key to store the file as.""" + + original_file_name: Optional[str] = None + """The original file name.""" + + presigned_url: Optional[str] = None + """The actual presigned URL the client can use to upload the file directly.""" + + +class EvaluationDatasetCreateFileUploadPresignedURLsResponse(BaseModel): + request_id: Optional[str] = None + """The ID generated for the request for Presigned URLs.""" + + uploads: Optional[List[Upload]] = None + """A list of generated presigned URLs and object keys, one per file.""" diff --git a/src/gradientai/types/regions/evaluation_dataset_create_params.py b/src/gradientai/types/regions/evaluation_dataset_create_params.py new file mode 100644 index 00000000..c8a84c23 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from ..knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam + +__all__ = ["EvaluationDatasetCreateParams"] + + +class EvaluationDatasetCreateParams(TypedDict, total=False): + file_upload_dataset: APIFileUploadDataSourceParam + """File to upload as data source for knowledge base.""" + + name: str + """The name of the agent evaluation dataset.""" diff --git a/src/gradientai/types/regions/evaluation_dataset_create_response.py b/src/gradientai/types/regions/evaluation_dataset_create_response.py new file mode 100644 index 00000000..f5c7fbac --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationDatasetCreateResponse"] + + +class EvaluationDatasetCreateResponse(BaseModel): + evaluation_dataset_uuid: Optional[str] = None + """Evaluation dataset uuid.""" diff --git a/src/gradientai/types/regions/evaluation_run_create_params.py b/src/gradientai/types/regions/evaluation_run_create_params.py new file mode 100644 index 00000000..1ae2dbbb --- /dev/null +++ b/src/gradientai/types/regions/evaluation_run_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationRunCreateParams"] + + +class EvaluationRunCreateParams(TypedDict, total=False): + agent_uuid: str + """Agent UUID to run the test case against.""" + + run_name: str + """The name of the run.""" + + test_case_uuid: str diff --git a/src/gradientai/types/regions/evaluation_run_create_response.py b/src/gradientai/types/regions/evaluation_run_create_response.py new file mode 100644 index 00000000..36942c29 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_run_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationRunCreateResponse"] + + +class EvaluationRunCreateResponse(BaseModel): + evaluation_run_uuid: Optional[str] = None diff --git a/src/gradientai/types/regions/evaluation_run_retrieve_response.py b/src/gradientai/types/regions/evaluation_run_retrieve_response.py new file mode 100644 index 00000000..68d71978 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_run_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .evaluation_runs.api_evaluation_run import APIEvaluationRun + +__all__ = ["EvaluationRunRetrieveResponse"] + + +class EvaluationRunRetrieveResponse(BaseModel): + evaluation_run: Optional[APIEvaluationRun] = None diff --git a/src/gradientai/types/regions/evaluation_runs/__init__.py b/src/gradientai/types/regions/evaluation_runs/__init__.py index f8ee8b14..0ec4f2f6 100644 --- a/src/gradientai/types/regions/evaluation_runs/__init__.py +++ b/src/gradientai/types/regions/evaluation_runs/__init__.py @@ -1,3 +1,9 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .api_prompt import APIPrompt as APIPrompt +from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun +from .result_retrieve_response import ResultRetrieveResponse as ResultRetrieveResponse +from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult +from .result_retrieve_prompt_response import ResultRetrievePromptResponse as ResultRetrievePromptResponse diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py b/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py new file mode 100644 index 00000000..cb50fd80 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["APIEvaluationMetricResult"] + + +class APIEvaluationMetricResult(BaseModel): + metric_name: Optional[str] = None + + number_value: Optional[float] = None + """The value of the metric as a number.""" + + string_value: Optional[str] = None + """The value of the metric as a string.""" diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py b/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py new file mode 100644 index 00000000..7822f53c --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ...._models import BaseModel +from .api_evaluation_metric_result import APIEvaluationMetricResult + +__all__ = ["APIEvaluationRun"] + + +class APIEvaluationRun(BaseModel): + agent_uuid: Optional[str] = None + """Agent UUID.""" + + agent_version_hash: Optional[str] = None + + evaluation_run_uuid: Optional[str] = None + """Evaluation run UUID.""" + + finished_at: Optional[datetime] = None + """Run end time.""" + + pass_status: Optional[bool] = None + """The pass status of the evaluation run based on the star metric.""" + + run_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None + + run_name: Optional[str] = None + """Run name.""" + + star_metric_result: Optional[APIEvaluationMetricResult] = None + + started_at: Optional[datetime] = None + """Run start time.""" + + status: Optional[ + Literal[ + "EVALUATION_RUN_STATUS_UNSPECIFIED", + "EVALUATION_RUN_QUEUED", + "EVALUATION_RUN_RUNNING_DATASET", + "EVALUATION_RUN_EVALUATING_RESULTS", + "EVALUATION_RUN_CANCELLING", + "EVALUATION_RUN_CANCELLED", + "EVALUATION_RUN_SUCCESSFUL", + "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", + "EVALUATION_RUN_FAILED", + ] + ] = None + + test_case_uuid: Optional[str] = None + """Test-case UUID.""" + + test_case_version: Optional[int] = None + """Test-case-version.""" diff --git a/src/gradientai/types/regions/evaluation_runs/api_prompt.py b/src/gradientai/types/regions/evaluation_runs/api_prompt.py new file mode 100644 index 00000000..fb5a51f4 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/api_prompt.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from .api_evaluation_metric_result import APIEvaluationMetricResult + +__all__ = ["APIPrompt", "PromptChunk"] + + +class PromptChunk(BaseModel): + chunk_usage_pct: Optional[float] = None + """The usage percentage of the chunk.""" + + chunk_used: Optional[bool] = None + """Indicates if the chunk was used in the prompt.""" + + index_uuid: Optional[str] = None + """The index uuid (Knowledge Base) of the chunk.""" + + source_name: Optional[str] = None + """The source name for the chunk, e.g., the file name or document title.""" + + text: Optional[str] = None + """Text content of the chunk.""" + + +class APIPrompt(BaseModel): + ground_truth: Optional[str] = None + """The ground truth for the prompt.""" + + input: Optional[str] = None + + output: Optional[str] = None + + prompt_chunks: Optional[List[PromptChunk]] = None + """The list of prompt chunks.""" + + prompt_id: Optional[int] = None + + prompt_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None + """The metric results for the prompt.""" diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py b/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py new file mode 100644 index 00000000..ebebee48 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_prompt import APIPrompt + +__all__ = ["ResultRetrievePromptResponse"] + + +class ResultRetrievePromptResponse(BaseModel): + prompt: Optional[APIPrompt] = None diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py b/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py new file mode 100644 index 00000000..27256353 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from .api_prompt import APIPrompt +from .api_evaluation_run import APIEvaluationRun + +__all__ = ["ResultRetrieveResponse"] + + +class ResultRetrieveResponse(BaseModel): + evaluation_run: Optional[APIEvaluationRun] = None + + prompts: Optional[List[APIPrompt]] = None + """The prompt level results.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_create_params.py b/src/gradientai/types/regions/evaluation_test_case_create_params.py new file mode 100644 index 00000000..51ce20c7 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_create_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +from .api_star_metric_param import APIStarMetricParam + +__all__ = ["EvaluationTestCaseCreateParams"] + + +class EvaluationTestCaseCreateParams(TypedDict, total=False): + dataset_uuid: str + """Dataset against which the test‑case is executed.""" + + description: str + """Description of the test case.""" + + metrics: List[str] + """Full metric list to use for evaluation test case.""" + + name: str + """Name of the test case.""" + + star_metric: APIStarMetricParam + + workspace_uuid: str + """The workspace uuid.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_create_response.py b/src/gradientai/types/regions/evaluation_test_case_create_response.py new file mode 100644 index 00000000..9f8e37f4 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationTestCaseCreateResponse"] + + +class EvaluationTestCaseCreateResponse(BaseModel): + test_case_uuid: Optional[str] = None + """Test‑case UUID.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py new file mode 100644 index 00000000..7f30ee28 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationTestCaseListEvaluationRunsParams"] + + +class EvaluationTestCaseListEvaluationRunsParams(TypedDict, total=False): + evaluation_test_case_version: int + """Version of the test case.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py new file mode 100644 index 00000000..4233d0ec --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .evaluation_runs.api_evaluation_run import APIEvaluationRun + +__all__ = ["EvaluationTestCaseListEvaluationRunsResponse"] + + +class EvaluationTestCaseListEvaluationRunsResponse(BaseModel): + evaluation_runs: Optional[List[APIEvaluationRun]] = None + """List of evaluation runs.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_list_response.py b/src/gradientai/types/regions/evaluation_test_case_list_response.py new file mode 100644 index 00000000..ccfc263e --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_list_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["EvaluationTestCaseListResponse"] + + +class EvaluationTestCaseListResponse(BaseModel): + evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None diff --git a/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py b/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py new file mode 100644 index 00000000..1511ba74 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["EvaluationTestCaseRetrieveResponse"] + + +class EvaluationTestCaseRetrieveResponse(BaseModel): + evaluation_test_case: Optional[APIEvaluationTestCase] = None diff --git a/src/gradientai/types/regions/evaluation_test_case_update_params.py b/src/gradientai/types/regions/evaluation_test_case_update_params.py new file mode 100644 index 00000000..be70fc95 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_update_params.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo +from .api_star_metric_param import APIStarMetricParam + +__all__ = ["EvaluationTestCaseUpdateParams", "Metrics"] + + +class EvaluationTestCaseUpdateParams(TypedDict, total=False): + dataset_uuid: str + """Dataset against which the test‑case is executed.""" + + description: str + """Description of the test case.""" + + metrics: Metrics + + name: str + """Name of the test case.""" + + star_metric: APIStarMetricParam + + body_test_case_uuid: Annotated[str, PropertyInfo(alias="test_case_uuid")] + + +class Metrics(TypedDict, total=False): + metric_uuids: List[str] diff --git a/src/gradientai/types/regions/evaluation_test_case_update_response.py b/src/gradientai/types/regions/evaluation_test_case_update_response.py new file mode 100644 index 00000000..6f8e3b04 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_update_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationTestCaseUpdateResponse"] + + +class EvaluationTestCaseUpdateResponse(BaseModel): + test_case_uuid: Optional[str] = None + + version: Optional[int] = None + """The new verson of the test case.""" diff --git a/tests/api_resources/regions/evaluation_runs/__init__.py b/tests/api_resources/regions/evaluation_runs/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/regions/evaluation_runs/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/regions/evaluation_runs/test_results.py b/tests/api_resources/regions/evaluation_runs/test_results.py new file mode 100644 index 00000000..29deb8b2 --- /dev/null +++ b/tests/api_resources/regions/evaluation_runs/test_results.py @@ -0,0 +1,200 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestResults: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + result = client.regions.evaluation_runs.results.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.results.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.results.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.regions.evaluation_runs.results.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_prompt(self, client: GradientAI) -> None: + result = client.regions.evaluation_runs.results.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_prompt(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_prompt(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_prompt(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="", + ) + + +class TestAsyncResults: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + result = await async_client.regions.evaluation_runs.results.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = await response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = await response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.regions.evaluation_runs.results.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + result = await async_client.regions.evaluation_runs.results.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = await response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = await response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="", + ) diff --git a/tests/api_resources/regions/test_evaluation_datasets.py b/tests/api_resources/regions/test_evaluation_datasets.py new file mode 100644 index 00000000..3e3da0fe --- /dev/null +++ b/tests/api_resources/regions/test_evaluation_datasets.py @@ -0,0 +1,211 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions import ( + EvaluationDatasetCreateResponse, + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationDatasets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create( + file_upload_dataset={ + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + name="name", + ) + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.regions.evaluation_datasets.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.regions.evaluation_datasets.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_method_create_file_upload_presigned_urls_with_all_params(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls( + files=[ + { + "file_name": "file_name", + "file_size": "file_size", + } + ], + ) + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + response = client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + with client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncEvaluationDatasets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create( + file_upload_dataset={ + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + name="name", + ) + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_datasets.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = await response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_datasets.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = await response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_file_upload_presigned_urls_with_all_params( + self, async_client: AsyncGradientAI + ) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls( + files=[ + { + "file_name": "file_name", + "file_size": "file_size", + } + ], + ) + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = await response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + async with ( + async_client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = await response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/regions/test_evaluation_runs.py b/tests/api_resources/regions/test_evaluation_runs.py new file mode 100644 index 00000000..b2d3c634 --- /dev/null +++ b/tests/api_resources/regions/test_evaluation_runs.py @@ -0,0 +1,187 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions import ( + EvaluationRunCreateResponse, + EvaluationRunRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationRuns: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + evaluation_run = client.regions.evaluation_runs.create() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + evaluation_run = client.regions.evaluation_runs.create( + agent_uuid="agent_uuid", + run_name="run_name", + test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + evaluation_run = client.regions.evaluation_runs.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.regions.evaluation_runs.with_raw_response.retrieve( + "", + ) + + +class TestAsyncEvaluationRuns: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.regions.evaluation_runs.create() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.regions.evaluation_runs.create( + agent_uuid="agent_uuid", + run_name="run_name", + test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.regions.evaluation_runs.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.regions.evaluation_runs.with_raw_response.retrieve( + "", + ) diff --git a/tests/api_resources/regions/test_evaluation_test_cases.py b/tests/api_resources/regions/test_evaluation_test_cases.py new file mode 100644 index 00000000..a01ace90 --- /dev/null +++ b/tests/api_resources/regions/test_evaluation_test_cases.py @@ -0,0 +1,486 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions import ( + EvaluationTestCaseListResponse, + EvaluationTestCaseCreateResponse, + EvaluationTestCaseUpdateResponse, + EvaluationTestCaseRetrieveResponse, + EvaluationTestCaseListEvaluationRunsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationTestCases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.create() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.create( + dataset_uuid="dataset_uuid", + description="description", + metrics=["string"], + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + workspace_uuid="workspace_uuid", + ) + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.retrieve( + "test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.retrieve( + "test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.retrieve( + "test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): + client.regions.evaluation_test_cases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + dataset_uuid="dataset_uuid", + description="description", + metrics={"metric_uuids": ["string"]}, + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + body_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.update( + path_test_case_uuid="test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): + client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.list() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_runs(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_version=0, + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_evaluation_runs(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" + ): + client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="", + ) + + +class TestAsyncEvaluationTestCases: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.create() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.create( + dataset_uuid="dataset_uuid", + description="description", + metrics=["string"], + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + workspace_uuid="workspace_uuid", + ) + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.retrieve( + "test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.retrieve( + "test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.retrieve( + "test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): + await async_client.regions.evaluation_test_cases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + dataset_uuid="dataset_uuid", + description="description", + metrics={"metric_uuids": ["string"]}, + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + body_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.update( + path_test_case_uuid="test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): + await async_client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.list() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_version=0, + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" + ): + await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="", + ) diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 8e25617f..9cb24b0a 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -54,6 +54,34 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_metrics(self, client: GradientAI) -> None: + region = client.regions.list_evaluation_metrics() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_evaluation_metrics(self, client: GradientAI) -> None: + response = client.regions.with_raw_response.list_evaluation_metrics() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_evaluation_metrics(self, client: GradientAI) -> None: + with client.regions.with_streaming_response.list_evaluation_metrics() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True + class TestAsyncRegions: parametrize = pytest.mark.parametrize( @@ -96,3 +124,31 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(RegionListResponse, region, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: + region = await async_client.regions.list_evaluation_metrics() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.with_raw_response.list_evaluation_metrics() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = await response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.with_streaming_response.list_evaluation_metrics() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = await response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True From 8bf3c0ba0d36e988feff14a2e2032a53a59c1642 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:19:45 +0000 Subject: [PATCH 058/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 46 +++++----- src/gradientai/_client.py | 39 +-------- src/gradientai/resources/__init__.py | 14 --- .../resources/knowledge_bases/__init__.py | 14 +++ .../{ => knowledge_bases}/indexing_jobs.py | 28 +++--- .../knowledge_bases/knowledge_bases.py | 32 +++++++ src/gradientai/types/__init__.py | 11 --- src/gradientai/types/api_knowledge_base.py | 2 +- .../types/knowledge_bases/__init__.py | 11 +++ .../{ => knowledge_bases}/api_indexing_job.py | 2 +- .../api_knowledge_base_data_source.py | 2 +- .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 2 +- .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 6 +- ...xing_job_retrieve_data_sources_response.py | 2 +- .../indexing_job_retrieve_response.py | 2 +- .../indexing_job_update_cancel_params.py | 2 +- .../indexing_job_update_cancel_response.py | 2 +- .../test_indexing_jobs.py | 86 +++++++++---------- 21 files changed, 152 insertions(+), 153 deletions(-) rename src/gradientai/resources/{ => knowledge_bases}/indexing_jobs.py (95%) rename src/gradientai/types/{ => knowledge_bases}/api_indexing_job.py (96%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_create_params.py (100%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_create_response.py (89%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_list_params.py (100%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_list_response.py (77%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_data_sources_response.py (97%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_response.py (89%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_params.py (91%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_response.py (90%) rename tests/api_resources/{ => knowledge_bases}/test_indexing_jobs.py (80%) diff --git a/.stats.yml b/.stats.yml index f0863f5f..b756ab92 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 211ece2994c6ac52f84f78ee56c1097a +config_hash: 4d0e9e07e7ac5a666632cffb655d028c diff --git a/api.md b/api.md index 970f6951..d14ceec2 100644 --- a/api.md +++ b/api.md @@ -260,29 +260,6 @@ Methods: - client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse - client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse -# IndexingJobs - -Types: - -```python -from gradientai.types import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) -``` - -Methods: - -- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # KnowledgeBases Types: @@ -329,6 +306,29 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +## IndexingJobs + +Types: + +```python +from gradientai.types.knowledge_bases import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # Chat ## Completions diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 71db35bc..00025498 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,11 +31,10 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource @@ -127,12 +126,6 @@ def regions(self) -> RegionsResource: return RegionsResource(self) - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - from .resources.indexing_jobs import IndexingJobsResource - - return IndexingJobsResource(self) - @cached_property def knowledge_bases(self) -> KnowledgeBasesResource: from .resources.knowledge_bases import KnowledgeBasesResource @@ -346,12 +339,6 @@ def regions(self) -> AsyncRegionsResource: return AsyncRegionsResource(self) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - from .resources.indexing_jobs import AsyncIndexingJobsResource - - return AsyncIndexingJobsResource(self) - @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResource: from .resources.knowledge_bases import AsyncKnowledgeBasesResource @@ -515,12 +502,6 @@ def regions(self) -> regions.RegionsResourceWithRawResponse: return RegionsResourceWithRawResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse - - return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse @@ -570,12 +551,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: return AsyncRegionsResourceWithRawResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse - - return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse @@ -625,12 +600,6 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse: return RegionsResourceWithStreamingResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse - - return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse @@ -680,12 +649,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: return AsyncRegionsResourceWithStreamingResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse - - return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 1763a13e..6ad0aa32 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -48,14 +48,6 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -84,12 +76,6 @@ "AsyncRegionsResourceWithRawResponse", "RegionsResourceWithStreamingResponse", "AsyncRegionsResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py index 03d143e2..80d04328 100644 --- a/src/gradientai/resources/knowledge_bases/__init__.py +++ b/src/gradientai/resources/knowledge_bases/__init__.py @@ -8,6 +8,14 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -24,6 +32,12 @@ "AsyncDataSourcesResourceWithRawResponse", "DataSourcesResourceWithStreamingResponse", "AsyncDataSourcesResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 95% rename from src/gradientai/resources/indexing_jobs.py rename to src/gradientai/resources/knowledge_bases/indexing_jobs.py index 71c59023..39151e41 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/knowledge_bases/indexing_jobs.py @@ -6,23 +6,27 @@ import httpx -from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.indexing_job_list_response import IndexingJobListResponse -from ..types.indexing_job_create_response import IndexingJobCreateResponse -from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse +from ..._base_client import make_request_options +from ...types.knowledge_bases import ( + indexing_job_list_params, + indexing_job_create_params, + indexing_job_update_cancel_params, +) +from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse +from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse +from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 2cab4f7b..28acdd7f 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -25,6 +25,14 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse @@ -40,6 +48,10 @@ class KnowledgeBasesResource(SyncAPIResource): def data_sources(self) -> DataSourcesResource: return DataSourcesResource(self._client) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + return IndexingJobsResource(self._client) + @cached_property def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: """ @@ -316,6 +328,10 @@ class AsyncKnowledgeBasesResource(AsyncAPIResource): def data_sources(self) -> AsyncDataSourcesResource: return AsyncDataSourcesResource(self._client) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + return AsyncIndexingJobsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: """ @@ -611,6 +627,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithRawResponse: return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: + return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) + class AsyncKnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -636,6 +656,10 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: + return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) + class KnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -661,6 +685,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithStreamingResponse: return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: + return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) + class AsyncKnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -685,3 +713,7 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index d09aaa2a..89a5ec4a 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -7,7 +7,6 @@ from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel -from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams @@ -26,28 +25,18 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .region_list_evaluation_metrics_response import ( RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, ) -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 5b4b6e2c..2b0676f0 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -4,7 +4,7 @@ from datetime import datetime from .._models import BaseModel -from .api_indexing_job import APIIndexingJob +from .knowledge_bases.api_indexing_job import APIIndexingJob __all__ = ["APIKnowledgeBase"] diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index 859c3618..9fc915e5 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -2,16 +2,27 @@ from __future__ import annotations +from .api_indexing_job import APIIndexingJob as APIIndexingJob from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams from .data_source_list_response import DataSourceListResponse as DataSourceListResponse +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) diff --git a/src/gradientai/types/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py similarity index 96% rename from src/gradientai/types/api_indexing_job.py rename to src/gradientai/types/knowledge_bases/api_indexing_job.py index f24aac94..2809141c 100644 --- a/src/gradientai/types/api_indexing_job.py +++ b/src/gradientai/types/knowledge_bases/api_indexing_job.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["APIIndexingJob"] diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index 57080aaa..ca24d6f0 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -4,7 +4,7 @@ from datetime import datetime from ..._models import BaseModel -from ..api_indexing_job import APIIndexingJob +from .api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource diff --git a/src/gradientai/types/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/indexing_job_create_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/gradientai/types/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 89% rename from src/gradientai/types/indexing_job_create_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_response.py index 839bc83b..835ec60d 100644 --- a/src/gradientai/types/indexing_job_create_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobCreateResponse"] diff --git a/src/gradientai/types/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/indexing_job_list_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 77% rename from src/gradientai/types/indexing_job_list_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_response.py index 1379cc55..4784c1a1 100644 --- a/src/gradientai/types/indexing_job_list_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -2,10 +2,10 @@ from typing import List, Optional -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from ..._models import BaseModel +from ..agents.api_meta import APIMeta from .api_indexing_job import APIIndexingJob +from ..agents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 97% rename from src/gradientai/types/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py index b178b984..a9d0c2c0 100644 --- a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] diff --git a/src/gradientai/types/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 89% rename from src/gradientai/types/indexing_job_retrieve_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py index 95f33d7a..6034bdf1 100644 --- a/src/gradientai/types/indexing_job_retrieve_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobRetrieveResponse"] diff --git a/src/gradientai/types/indexing_job_update_cancel_params.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 91% rename from src/gradientai/types/indexing_job_update_cancel_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py index 4c2848b0..9359a42a 100644 --- a/src/gradientai/types/indexing_job_update_cancel_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from .._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["IndexingJobUpdateCancelParams"] diff --git a/src/gradientai/types/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 90% rename from src/gradientai/types/indexing_job_update_cancel_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py index d50e1865..ae4b394f 100644 --- a/src/gradientai/types/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobUpdateCancelResponse"] diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py similarity index 80% rename from tests/api_resources/test_indexing_jobs.py rename to tests/api_resources/knowledge_bases/test_indexing_jobs.py index 6a50d9b5..8bf1829f 100644 --- a/tests/api_resources/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -26,13 +26,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.create() + indexing_job = client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.create( + indexing_job = client.knowledge_bases.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.create() + response = client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.create() as response: + with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.retrieve( + indexing_job = client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.retrieve( + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.retrieve( + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve( + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.list() + indexing_job = client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.list( + indexing_job = client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, ) @@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.list() + response = client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -130,7 +130,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.list() as response: + with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_data_sources(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.retrieve_data_sources( + indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.retrieve_data_sources( + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.retrieve_data_sources( + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N @parametrize def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve_data_sources( + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.update_cancel( + indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.update_cancel( + indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.update_cancel( + response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.update_cancel( + with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_path_params_update_cancel(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.indexing_jobs.with_raw_response.update_cancel( + client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) @@ -241,13 +241,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.create() + indexing_job = await async_client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.create( + indexing_job = await async_client.knowledge_bases.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -256,7 +256,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.create() + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.create() as response: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.retrieve( + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -286,7 +286,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -298,7 +298,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -313,20 +313,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.list() + indexing_job = await async_client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.list( + indexing_job = await async_client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, ) @@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.list() + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -345,7 +345,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.list() as response: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,7 +357,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.retrieve_data_sources( + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -365,7 +365,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -377,7 +377,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -392,14 +392,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( + indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -407,7 +407,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( + indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -416,7 +416,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.update_cancel( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -428,7 +428,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.update_cancel( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -443,6 +443,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien @parametrize async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.update_cancel( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) From 780bdc5529c77c63e54bd357829b64c5d536461d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:33:11 +0000 Subject: [PATCH 059/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index b756ab92..af151d02 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 4d0e9e07e7ac5a666632cffb655d028c +config_hash: c53f9cc8c1576a747f2e766faafbbc06 From 512fcbf7a046c27b56b1d6e22047e65b1eebf321 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:50:10 +0000 Subject: [PATCH 060/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 46 +++++----- src/gradientai/_client.py | 39 ++++++++- src/gradientai/resources/__init__.py | 14 +++ .../{knowledge_bases => }/indexing_jobs.py | 28 +++--- .../resources/knowledge_bases/__init__.py | 14 --- .../knowledge_bases/knowledge_bases.py | 32 ------- src/gradientai/types/__init__.py | 11 +++ .../{knowledge_bases => }/api_indexing_job.py | 2 +- src/gradientai/types/api_knowledge_base.py | 2 +- .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 2 +- .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 6 +- ...xing_job_retrieve_data_sources_response.py | 2 +- .../indexing_job_retrieve_response.py | 2 +- .../indexing_job_update_cancel_params.py | 2 +- .../indexing_job_update_cancel_response.py | 2 +- .../types/knowledge_bases/__init__.py | 11 --- .../api_knowledge_base_data_source.py | 2 +- .../test_indexing_jobs.py | 86 +++++++++---------- 21 files changed, 153 insertions(+), 152 deletions(-) rename src/gradientai/resources/{knowledge_bases => }/indexing_jobs.py (95%) rename src/gradientai/types/{knowledge_bases => }/api_indexing_job.py (96%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_create_params.py (100%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_create_response.py (89%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_list_params.py (100%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_list_response.py (77%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_retrieve_data_sources_response.py (97%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_retrieve_response.py (89%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_update_cancel_params.py (91%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_update_cancel_response.py (90%) rename tests/api_resources/{knowledge_bases => }/test_indexing_jobs.py (80%) diff --git a/.stats.yml b/.stats.yml index af151d02..f0863f5f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: c53f9cc8c1576a747f2e766faafbbc06 +config_hash: 211ece2994c6ac52f84f78ee56c1097a diff --git a/api.md b/api.md index d14ceec2..970f6951 100644 --- a/api.md +++ b/api.md @@ -260,6 +260,29 @@ Methods: - client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse - client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +# IndexingJobs + +Types: + +```python +from gradientai.types import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # KnowledgeBases Types: @@ -306,29 +329,6 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse -## IndexingJobs - -Types: - -```python -from gradientai.types.knowledge_bases import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) -``` - -Methods: - -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # Chat ## Completions diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 00025498..71db35bc 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,10 +31,11 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource @@ -126,6 +127,12 @@ def regions(self) -> RegionsResource: return RegionsResource(self) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + from .resources.indexing_jobs import IndexingJobsResource + + return IndexingJobsResource(self) + @cached_property def knowledge_bases(self) -> KnowledgeBasesResource: from .resources.knowledge_bases import KnowledgeBasesResource @@ -339,6 +346,12 @@ def regions(self) -> AsyncRegionsResource: return AsyncRegionsResource(self) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + from .resources.indexing_jobs import AsyncIndexingJobsResource + + return AsyncIndexingJobsResource(self) + @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResource: from .resources.knowledge_bases import AsyncKnowledgeBasesResource @@ -502,6 +515,12 @@ def regions(self) -> regions.RegionsResourceWithRawResponse: return RegionsResourceWithRawResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse + + return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse @@ -551,6 +570,12 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: return AsyncRegionsResourceWithRawResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse + + return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse @@ -600,6 +625,12 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse: return RegionsResourceWithStreamingResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse + + return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse @@ -649,6 +680,12 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: return AsyncRegionsResourceWithStreamingResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse + + return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 6ad0aa32..1763a13e 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -48,6 +48,14 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -76,6 +84,12 @@ "AsyncRegionsResourceWithRawResponse", "RegionsResourceWithStreamingResponse", "AsyncRegionsResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py similarity index 95% rename from src/gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/gradientai/resources/indexing_jobs.py index 39151e41..71c59023 100644 --- a/src/gradientai/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -6,27 +6,23 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.knowledge_bases import ( - indexing_job_list_params, - indexing_job_create_params, - indexing_job_update_cancel_params, -) -from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse -from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse -from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse +from .._base_client import make_request_options +from ..types.indexing_job_list_response import IndexingJobListResponse +from ..types.indexing_job_create_response import IndexingJobCreateResponse +from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py index 80d04328..03d143e2 100644 --- a/src/gradientai/resources/knowledge_bases/__init__.py +++ b/src/gradientai/resources/knowledge_bases/__init__.py @@ -8,14 +8,6 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -32,12 +24,6 @@ "AsyncDataSourcesResourceWithRawResponse", "DataSourcesResourceWithStreamingResponse", "AsyncDataSourcesResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 28acdd7f..2cab4f7b 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -25,14 +25,6 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse @@ -48,10 +40,6 @@ class KnowledgeBasesResource(SyncAPIResource): def data_sources(self) -> DataSourcesResource: return DataSourcesResource(self._client) - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - return IndexingJobsResource(self._client) - @cached_property def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: """ @@ -328,10 +316,6 @@ class AsyncKnowledgeBasesResource(AsyncAPIResource): def data_sources(self) -> AsyncDataSourcesResource: return AsyncDataSourcesResource(self._client) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - return AsyncIndexingJobsResource(self._client) - @cached_property def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: """ @@ -627,10 +611,6 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithRawResponse: return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - @cached_property - def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: - return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) - class AsyncKnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -656,10 +636,6 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: - return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) - class KnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -685,10 +661,6 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithStreamingResponse: return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) - @cached_property - def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: - return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) - class AsyncKnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -713,7 +685,3 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: - return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 89a5ec4a..d09aaa2a 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -7,6 +7,7 @@ from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel +from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams @@ -25,18 +26,28 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .region_list_evaluation_metrics_response import ( RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, ) +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/api_indexing_job.py similarity index 96% rename from src/gradientai/types/knowledge_bases/api_indexing_job.py rename to src/gradientai/types/api_indexing_job.py index 2809141c..f24aac94 100644 --- a/src/gradientai/types/knowledge_bases/api_indexing_job.py +++ b/src/gradientai/types/api_indexing_job.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIIndexingJob"] diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 2b0676f0..5b4b6e2c 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -4,7 +4,7 @@ from datetime import datetime from .._models import BaseModel -from .knowledge_bases.api_indexing_job import APIIndexingJob +from .api_indexing_job import APIIndexingJob __all__ = ["APIKnowledgeBase"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/gradientai/types/indexing_job_create_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/indexing_job_create_response.py similarity index 89% rename from src/gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/gradientai/types/indexing_job_create_response.py index 835ec60d..839bc83b 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py +++ b/src/gradientai/types/indexing_job_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobCreateResponse"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/gradientai/types/indexing_job_list_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py similarity index 77% rename from src/gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/gradientai/types/indexing_job_list_response.py index 4784c1a1..1379cc55 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py +++ b/src/gradientai/types/indexing_job_list_response.py @@ -2,10 +2,10 @@ from typing import List, Optional -from ..._models import BaseModel -from ..agents.api_meta import APIMeta +from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob -from ..agents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py similarity index 97% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/indexing_job_retrieve_data_sources_response.py index a9d0c2c0..b178b984 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py +++ b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/indexing_job_retrieve_response.py similarity index 89% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/gradientai/types/indexing_job_retrieve_response.py index 6034bdf1..95f33d7a 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py +++ b/src/gradientai/types/indexing_job_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobRetrieveResponse"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradientai/types/indexing_job_update_cancel_params.py similarity index 91% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/gradientai/types/indexing_job_update_cancel_params.py index 9359a42a..4c2848b0 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py +++ b/src/gradientai/types/indexing_job_update_cancel_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["IndexingJobUpdateCancelParams"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/indexing_job_update_cancel_response.py similarity index 90% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/gradientai/types/indexing_job_update_cancel_response.py index ae4b394f..d50e1865 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/indexing_job_update_cancel_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobUpdateCancelResponse"] diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index 9fc915e5..859c3618 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -2,27 +2,16 @@ from __future__ import annotations -from .api_indexing_job import APIIndexingJob as APIIndexingJob from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams from .data_source_list_response import DataSourceListResponse as DataSourceListResponse -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index ca24d6f0..57080aaa 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -4,7 +4,7 @@ from datetime import datetime from ..._models import BaseModel -from .api_indexing_job import APIIndexingJob +from ..api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py similarity index 80% rename from tests/api_resources/knowledge_bases/test_indexing_jobs.py rename to tests/api_resources/test_indexing_jobs.py index 8bf1829f..6a50d9b5 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/test_indexing_jobs.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from gradientai.types import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -26,13 +26,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.create() + indexing_job = client.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.create( + indexing_job = client.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.create() + response = client.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: + with client.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.retrieve( + indexing_job = client.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + response = client.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( + with client.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + client.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.list() + indexing_job = client.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.list( + indexing_job = client.indexing_jobs.list( page=0, per_page=0, ) @@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.list() + response = client.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -130,7 +130,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: + with client.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_data_sources(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( + indexing_job = client.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + response = client.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( + with client.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N @parametrize def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + client.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = client.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = client.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + response = client.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( + with client.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_path_params_update_cancel(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + client.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) @@ -241,13 +241,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.create() + indexing_job = await async_client.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.create( + indexing_job = await async_client.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -256,7 +256,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() + response = await async_client.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: + async with async_client.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( + indexing_job = await async_client.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -286,7 +286,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + response = await async_client.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -298,7 +298,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( + async with async_client.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -313,20 +313,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + await async_client.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.list() + indexing_job = await async_client.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.list( + indexing_job = await async_client.indexing_jobs.list( page=0, per_page=0, ) @@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() + response = await async_client.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -345,7 +345,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: + async with async_client.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,7 +357,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( + indexing_job = await async_client.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -365,7 +365,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -377,7 +377,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( + async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -392,14 +392,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = await async_client.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -407,7 +407,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = await async_client.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -416,7 +416,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + response = await async_client.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -428,7 +428,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( + async with async_client.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -443,6 +443,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien @parametrize async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + await async_client.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) From ad19b48d402877bef1beac968c497fcd7f450197 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:54:11 +0000 Subject: [PATCH 061/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index aaf968a1..b56c3d0b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.3" + ".": "0.1.0-alpha.4" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 1c89346a..0dd5228b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index 2cf47e97..4d3df522 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.3" # x-release-please-version +__version__ = "0.1.0-alpha.4" # x-release-please-version From d4317a3128d96756088b828abb6f904f3064de45 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:57:56 +0000 Subject: [PATCH 062/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 44 ++--- api.md | 180 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{gradientai => do_gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{gradientai => do_gradientai}/_client.py | 0 src/{gradientai => do_gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{gradientai => do_gradientai}/_files.py | 0 src/{gradientai => do_gradientai}/_models.py | 0 src/{gradientai => do_gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{gradientai => do_gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{gradientai => do_gradientai}/_version.py | 2 +- src/do_gradientai/lib/.keep | 4 + src/{gradientai => do_gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/child_agents.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/indexing_jobs.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/inference/models.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/models.py | 0 .../resources/providers/__init__.py | 0 .../resources/providers/anthropic/__init__.py | 0 .../providers/anthropic/anthropic.py | 0 .../resources/providers/anthropic/keys.py | 0 .../resources/providers/openai/__init__.py | 0 .../resources/providers/openai/keys.py | 0 .../resources/providers/openai/openai.py | 0 .../resources/providers/providers.py | 0 .../resources/regions/__init__.py | 0 .../resources/regions/evaluation_datasets.py | 0 .../regions/evaluation_runs/__init__.py | 0 .../evaluation_runs/evaluation_runs.py | 0 .../regions/evaluation_runs/results.py | 0 .../regions/evaluation_test_cases.py | 0 .../resources/regions/regions.py | 0 .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_links.py | 0 .../types/agents/api_meta.py | 0 .../types/agents/child_agent_add_params.py | 0 .../types/agents/child_agent_add_response.py | 0 .../agents/child_agent_delete_response.py | 0 .../types/agents/child_agent_update_params.py | 0 .../agents/child_agent_update_response.py | 0 .../types/agents/child_agent_view_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_evaluation_metric.py | 0 .../types/api_indexing_job.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../chat/chat_completion_token_logprob.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/indexing_job_create_params.py | 0 .../types/indexing_job_create_response.py | 0 .../types/indexing_job_list_params.py | 0 .../types/indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../types/indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/inference/model.py | 0 .../types/inference/model_list_response.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../types/model_list_params.py | 0 .../types/model_list_response.py | 0 .../types/providers/__init__.py | 0 .../types/providers/anthropic/__init__.py | 0 .../providers/anthropic/key_create_params.py | 0 .../anthropic/key_create_response.py | 0 .../anthropic/key_delete_response.py | 0 .../anthropic/key_list_agents_params.py | 0 .../anthropic/key_list_agents_response.py | 0 .../providers/anthropic/key_list_params.py | 0 .../providers/anthropic/key_list_response.py | 0 .../anthropic/key_retrieve_response.py | 0 .../providers/anthropic/key_update_params.py | 0 .../anthropic/key_update_response.py | 0 .../types/providers/openai/__init__.py | 0 .../providers/openai/key_create_params.py | 0 .../providers/openai/key_create_response.py | 0 .../providers/openai/key_delete_response.py | 0 .../types/providers/openai/key_list_params.py | 0 .../providers/openai/key_list_response.py | 0 .../openai/key_retrieve_agents_params.py | 0 .../openai/key_retrieve_agents_response.py | 0 .../providers/openai/key_retrieve_response.py | 0 .../providers/openai/key_update_params.py | 0 .../providers/openai/key_update_response.py | 0 ...region_list_evaluation_metrics_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/regions/__init__.py | 0 .../types/regions/api_evaluation_test_case.py | 0 .../types/regions/api_star_metric.py | 0 .../types/regions/api_star_metric_param.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../regions/evaluation_run_create_params.py | 0 .../regions/evaluation_run_create_response.py | 0 .../evaluation_run_retrieve_response.py | 0 .../types/regions/evaluation_runs/__init__.py | 0 .../api_evaluation_metric_result.py | 0 .../evaluation_runs/api_evaluation_run.py | 0 .../regions/evaluation_runs/api_prompt.py | 0 .../result_retrieve_prompt_response.py | 0 .../result_retrieve_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 tests/api_resources/agents/test_api_keys.py | 4 +- .../api_resources/agents/test_child_agents.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- tests/api_resources/inference/test_models.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../providers/anthropic/test_keys.py | 4 +- .../providers/openai/test_keys.py | 4 +- .../regions/evaluation_runs/test_results.py | 4 +- .../regions/test_evaluation_datasets.py | 4 +- .../regions/test_evaluation_runs.py | 4 +- .../regions/test_evaluation_test_cases.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_indexing_jobs.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 46 ++--- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 255 files changed, 234 insertions(+), 230 deletions(-) rename src/{gradientai => do_gradientai}/__init__.py (95%) rename src/{gradientai => do_gradientai}/_base_client.py (99%) rename src/{gradientai => do_gradientai}/_client.py (100%) rename src/{gradientai => do_gradientai}/_compat.py (100%) rename src/{gradientai => do_gradientai}/_constants.py (100%) rename src/{gradientai => do_gradientai}/_exceptions.py (100%) rename src/{gradientai => do_gradientai}/_files.py (100%) rename src/{gradientai => do_gradientai}/_models.py (100%) rename src/{gradientai => do_gradientai}/_qs.py (100%) rename src/{gradientai => do_gradientai}/_resource.py (100%) rename src/{gradientai => do_gradientai}/_response.py (99%) rename src/{gradientai => do_gradientai}/_streaming.py (100%) rename src/{gradientai => do_gradientai}/_types.py (99%) rename src/{gradientai => do_gradientai}/_utils/__init__.py (100%) rename src/{gradientai => do_gradientai}/_utils/_logs.py (75%) rename src/{gradientai => do_gradientai}/_utils/_proxy.py (100%) rename src/{gradientai => do_gradientai}/_utils/_reflection.py (100%) rename src/{gradientai => do_gradientai}/_utils/_resources_proxy.py (50%) rename src/{gradientai => do_gradientai}/_utils/_streams.py (100%) rename src/{gradientai => do_gradientai}/_utils/_sync.py (100%) rename src/{gradientai => do_gradientai}/_utils/_transform.py (100%) rename src/{gradientai => do_gradientai}/_utils/_typing.py (100%) rename src/{gradientai => do_gradientai}/_utils/_utils.py (100%) rename src/{gradientai => do_gradientai}/_version.py (83%) create mode 100644 src/do_gradientai/lib/.keep rename src/{gradientai => do_gradientai}/py.typed (100%) rename src/{gradientai => do_gradientai}/resources/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/child_agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/functions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/versions.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/indexing_jobs.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/inference.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/models.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/models.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/anthropic/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/anthropic/anthropic.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/anthropic/keys.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/openai/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/openai/keys.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/openai/openai.py (100%) rename src/{gradientai => do_gradientai}/resources/providers/providers.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/evaluation_datasets.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/evaluation_runs/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/evaluation_runs/evaluation_runs.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/evaluation_runs/results.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/evaluation_test_cases.py (100%) rename src/{gradientai => do_gradientai}/resources/regions/regions.py (100%) rename src/{gradientai => do_gradientai}/types/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_links.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_meta.py (100%) rename src/{gradientai => do_gradientai}/types/agents/child_agent_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/child_agent_add_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/child_agent_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/child_agent_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/child_agent_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/child_agent_view_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_agreement.py (100%) rename src/{gradientai => do_gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_deployment_visibility.py (100%) rename src/{gradientai => do_gradientai}/types/api_evaluation_metric.py (100%) rename src/{gradientai => do_gradientai}/types/api_indexing_job.py (100%) rename src/{gradientai => do_gradientai}/types/api_knowledge_base.py (100%) rename src/{gradientai => do_gradientai}/types/api_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_model_version.py (100%) rename src/{gradientai => do_gradientai}/types/api_openai_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_retrieval_method.py (100%) rename src/{gradientai => do_gradientai}/types/api_workspace.py (100%) rename src/{gradientai => do_gradientai}/types/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/chat/chat_completion_token_logprob.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_retrieve_data_sources_response.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_update_cancel_params.py (100%) rename src/{gradientai => do_gradientai}/types/indexing_job_update_cancel_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/inference/model.py (100%) rename src/{gradientai => do_gradientai}/types/inference/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_retrieve_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_retrieve_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/providers/openai/key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_evaluation_metrics_response.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/regions/api_evaluation_test_case.py (100%) rename src/{gradientai => do_gradientai}/types/regions/api_star_metric.py (100%) rename src/{gradientai => do_gradientai}/types/regions/api_star_metric_param.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_run_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_run_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_run_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/api_evaluation_metric_result.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/api_evaluation_run.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/api_prompt.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/result_retrieve_prompt_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/result_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_update_response.py (100%) diff --git a/.stats.yml b/.stats.yml index f0863f5f..ed791f90 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 211ece2994c6ac52f84f78ee56c1097a +config_hash: 0c94579072c21854f9e042dfaac74e1d diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 086907ef..4f59c83a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/gradientai/lib/` and `examples/` directories. +modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index 09b1e15d..64a2af23 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -49,7 +49,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -84,8 +84,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import os import asyncio -from gradientai import DefaultAioHttpClient -from gradientai import AsyncGradientAI +from do_gradientai import DefaultAioHttpClient +from do_gradientai import AsyncGradientAI async def main() -> None: @@ -116,7 +116,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -128,16 +128,16 @@ print(evaluation_test_case.star_metric) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `gradientai.APIError`. +All errors inherit from `do_gradientai.APIError`. ```python -import gradientai -from gradientai import GradientAI +import do_gradientai +from do_gradientai import GradientAI client = GradientAI() @@ -145,12 +145,12 @@ try: client.agents.versions.list( uuid="REPLACE_ME", ) -except gradientai.APIConnectionError as e: +except do_gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except gradientai.RateLimitError as e: +except do_gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except gradientai.APIStatusError as e: +except do_gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -178,7 +178,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -198,7 +198,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -252,7 +252,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() response = client.agents.versions.with_raw_response.list( @@ -264,9 +264,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -330,7 +330,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from gradientai import GradientAI, DefaultHttpxClient +from do_gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -353,7 +353,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from gradientai import GradientAI +from do_gradientai import GradientAI with GradientAI() as client: # make requests here @@ -381,8 +381,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import gradientai -print(gradientai.__version__) +import do_gradientai +print(do_gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 970f6951..a10c03ef 100644 --- a/api.md +++ b/api.md @@ -3,7 +3,7 @@ Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -23,19 +23,19 @@ from gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -46,18 +46,18 @@ from gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Functions Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -66,43 +66,43 @@ from gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse +from do_gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## ChildAgents Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( ChildAgentUpdateResponse, ChildAgentDeleteResponse, ChildAgentAddResponse, @@ -112,10 +112,10 @@ from gradientai.types.agents import ( Methods: -- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse # Providers @@ -126,7 +126,7 @@ Methods: Types: ```python -from gradientai.types.providers.anthropic import ( +from do_gradientai.types.providers.anthropic import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -138,12 +138,12 @@ from gradientai.types.providers.anthropic import ( Methods: -- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse ## OpenAI @@ -152,7 +152,7 @@ Methods: Types: ```python -from gradientai.types.providers.openai import ( +from do_gradientai.types.providers.openai import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -164,19 +164,19 @@ from gradientai.types.providers.openai import ( Methods: -- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse +- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse # Regions Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIEvaluationMetric, RegionListResponse, RegionListEvaluationMetricsResponse, @@ -185,28 +185,28 @@ from gradientai.types import ( Methods: -- client.regions.list(\*\*params) -> RegionListResponse -- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse +- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse ## EvaluationRuns Types: ```python -from gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse +from do_gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse ``` Methods: -- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse ### Results Types: ```python -from gradientai.types.regions.evaluation_runs import ( +from do_gradientai.types.regions.evaluation_runs import ( APIEvaluationMetricResult, APIEvaluationRun, APIPrompt, @@ -217,15 +217,15 @@ from gradientai.types.regions.evaluation_runs import ( Methods: -- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse -- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse +- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse +- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse ## EvaluationTestCases Types: ```python -from gradientai.types.regions import ( +from do_gradientai.types.regions import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -238,18 +238,18 @@ from gradientai.types.regions import ( Methods: -- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse -- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse +- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from gradientai.types.regions import ( +from do_gradientai.types.regions import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -257,15 +257,15 @@ from gradientai.types.regions import ( Methods: -- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse # IndexingJobs Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIIndexingJob, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -277,18 +277,18 @@ from gradientai.types import ( Methods: -- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # KnowledgeBases Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -300,18 +300,18 @@ from gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -325,9 +325,9 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse # Chat @@ -336,12 +336,12 @@ Methods: Types: ```python -from gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse +from do_gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # Inference @@ -350,7 +350,7 @@ Methods: Types: ```python -from gradientai.types.inference import ( +from do_gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -362,33 +362,33 @@ from gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse ## Models Types: ```python -from gradientai.types.inference import Model, ModelListResponse +from do_gradientai.types.inference import Model, ModelListResponse ``` Methods: -- client.inference.models.retrieve(model) -> Model -- client.inference.models.list() -> ModelListResponse +- client.inference.models.retrieve(model) -> Model +- client.inference.models.list() -> ModelListResponse # Models Types: ```python -from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse +from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse diff --git a/mypy.ini b/mypy.ini index 748d8234..82b0c891 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 0dd5228b..7a5cd80b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,14 +78,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import gradientai'" +"check:importable" = "python -c 'import do_gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -98,7 +98,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/gradientai"] +packages = ["src/do_gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -201,7 +201,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["gradientai", "tests"] +known-first-party = ["do_gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index 2ff9a58c..a320c1a8 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/gradientai/_version.py" + "src/do_gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index 37b38f6f..e46e909b 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import gradientai' +rye run python -c 'import do_gradientai' diff --git a/src/gradientai/__init__.py b/src/do_gradientai/__init__.py similarity index 95% rename from src/gradientai/__init__.py rename to src/do_gradientai/__init__.py index 3316fe47..41b943b2 100644 --- a/src/gradientai/__init__.py +++ b/src/do_gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError +# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "gradientai" + __locals[__name].__module__ = "do_gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/gradientai/_base_client.py b/src/do_gradientai/_base_client.py similarity index 99% rename from src/gradientai/_base_client.py rename to src/do_gradientai/_base_client.py index 6dce600b..30108c9d 100644 --- a/src/gradientai/_base_client.py +++ b/src/do_gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/gradientai/_client.py b/src/do_gradientai/_client.py similarity index 100% rename from src/gradientai/_client.py rename to src/do_gradientai/_client.py diff --git a/src/gradientai/_compat.py b/src/do_gradientai/_compat.py similarity index 100% rename from src/gradientai/_compat.py rename to src/do_gradientai/_compat.py diff --git a/src/gradientai/_constants.py b/src/do_gradientai/_constants.py similarity index 100% rename from src/gradientai/_constants.py rename to src/do_gradientai/_constants.py diff --git a/src/gradientai/_exceptions.py b/src/do_gradientai/_exceptions.py similarity index 100% rename from src/gradientai/_exceptions.py rename to src/do_gradientai/_exceptions.py diff --git a/src/gradientai/_files.py b/src/do_gradientai/_files.py similarity index 100% rename from src/gradientai/_files.py rename to src/do_gradientai/_files.py diff --git a/src/gradientai/_models.py b/src/do_gradientai/_models.py similarity index 100% rename from src/gradientai/_models.py rename to src/do_gradientai/_models.py diff --git a/src/gradientai/_qs.py b/src/do_gradientai/_qs.py similarity index 100% rename from src/gradientai/_qs.py rename to src/do_gradientai/_qs.py diff --git a/src/gradientai/_resource.py b/src/do_gradientai/_resource.py similarity index 100% rename from src/gradientai/_resource.py rename to src/do_gradientai/_resource.py diff --git a/src/gradientai/_response.py b/src/do_gradientai/_response.py similarity index 99% rename from src/gradientai/_response.py rename to src/do_gradientai/_response.py index 2037e4ca..8ca43971 100644 --- a/src/gradientai/_response.py +++ b/src/do_gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", ) diff --git a/src/gradientai/_streaming.py b/src/do_gradientai/_streaming.py similarity index 100% rename from src/gradientai/_streaming.py rename to src/do_gradientai/_streaming.py diff --git a/src/gradientai/_types.py b/src/do_gradientai/_types.py similarity index 99% rename from src/gradientai/_types.py rename to src/do_gradientai/_types.py index 1bac876d..c356c700 100644 --- a/src/gradientai/_types.py +++ b/src/do_gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from gradientai import NoneType +# from do_gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/gradientai/_utils/__init__.py b/src/do_gradientai/_utils/__init__.py similarity index 100% rename from src/gradientai/_utils/__init__.py rename to src/do_gradientai/_utils/__init__.py diff --git a/src/gradientai/_utils/_logs.py b/src/do_gradientai/_utils/_logs.py similarity index 75% rename from src/gradientai/_utils/_logs.py rename to src/do_gradientai/_utils/_logs.py index 9047e5c8..ac45b1a5 100644 --- a/src/gradientai/_utils/_logs.py +++ b/src/do_gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("gradientai") +logger: logging.Logger = logging.getLogger("do_gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/gradientai/_utils/_proxy.py b/src/do_gradientai/_utils/_proxy.py similarity index 100% rename from src/gradientai/_utils/_proxy.py rename to src/do_gradientai/_utils/_proxy.py diff --git a/src/gradientai/_utils/_reflection.py b/src/do_gradientai/_utils/_reflection.py similarity index 100% rename from src/gradientai/_utils/_reflection.py rename to src/do_gradientai/_utils/_reflection.py diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/do_gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/gradientai/_utils/_resources_proxy.py rename to src/do_gradientai/_utils/_resources_proxy.py index b3bc4931..03763c3b 100644 --- a/src/gradientai/_utils/_resources_proxy.py +++ b/src/do_gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `gradientai.resources` module. + """A proxy for the `do_gradientai.resources` module. - This is used so that we can lazily import `gradientai.resources` only when - needed *and* so that users can just import `gradientai` and reference `gradientai.resources` + This is used so that we can lazily import `do_gradientai.resources` only when + needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("gradientai.resources") + mod = importlib.import_module("do_gradientai.resources") return mod diff --git a/src/gradientai/_utils/_streams.py b/src/do_gradientai/_utils/_streams.py similarity index 100% rename from src/gradientai/_utils/_streams.py rename to src/do_gradientai/_utils/_streams.py diff --git a/src/gradientai/_utils/_sync.py b/src/do_gradientai/_utils/_sync.py similarity index 100% rename from src/gradientai/_utils/_sync.py rename to src/do_gradientai/_utils/_sync.py diff --git a/src/gradientai/_utils/_transform.py b/src/do_gradientai/_utils/_transform.py similarity index 100% rename from src/gradientai/_utils/_transform.py rename to src/do_gradientai/_utils/_transform.py diff --git a/src/gradientai/_utils/_typing.py b/src/do_gradientai/_utils/_typing.py similarity index 100% rename from src/gradientai/_utils/_typing.py rename to src/do_gradientai/_utils/_typing.py diff --git a/src/gradientai/_utils/_utils.py b/src/do_gradientai/_utils/_utils.py similarity index 100% rename from src/gradientai/_utils/_utils.py rename to src/do_gradientai/_utils/_utils.py diff --git a/src/gradientai/_version.py b/src/do_gradientai/_version.py similarity index 83% rename from src/gradientai/_version.py rename to src/do_gradientai/_version.py index 4d3df522..83bf8865 100644 --- a/src/gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "gradientai" +__title__ = "do_gradientai" __version__ = "0.1.0-alpha.4" # x-release-please-version diff --git a/src/do_gradientai/lib/.keep b/src/do_gradientai/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/do_gradientai/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/gradientai/py.typed b/src/do_gradientai/py.typed similarity index 100% rename from src/gradientai/py.typed rename to src/do_gradientai/py.typed diff --git a/src/gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py similarity index 100% rename from src/gradientai/resources/__init__.py rename to src/do_gradientai/resources/__init__.py diff --git a/src/gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py similarity index 100% rename from src/gradientai/resources/agents/__init__.py rename to src/do_gradientai/resources/agents/__init__.py diff --git a/src/gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py similarity index 100% rename from src/gradientai/resources/agents/agents.py rename to src/do_gradientai/resources/agents/agents.py diff --git a/src/gradientai/resources/agents/api_keys.py b/src/do_gradientai/resources/agents/api_keys.py similarity index 100% rename from src/gradientai/resources/agents/api_keys.py rename to src/do_gradientai/resources/agents/api_keys.py diff --git a/src/gradientai/resources/agents/child_agents.py b/src/do_gradientai/resources/agents/child_agents.py similarity index 100% rename from src/gradientai/resources/agents/child_agents.py rename to src/do_gradientai/resources/agents/child_agents.py diff --git a/src/gradientai/resources/agents/functions.py b/src/do_gradientai/resources/agents/functions.py similarity index 100% rename from src/gradientai/resources/agents/functions.py rename to src/do_gradientai/resources/agents/functions.py diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/do_gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/agents/knowledge_bases.py rename to src/do_gradientai/resources/agents/knowledge_bases.py diff --git a/src/gradientai/resources/agents/versions.py b/src/do_gradientai/resources/agents/versions.py similarity index 100% rename from src/gradientai/resources/agents/versions.py rename to src/do_gradientai/resources/agents/versions.py diff --git a/src/gradientai/resources/chat/__init__.py b/src/do_gradientai/resources/chat/__init__.py similarity index 100% rename from src/gradientai/resources/chat/__init__.py rename to src/do_gradientai/resources/chat/__init__.py diff --git a/src/gradientai/resources/chat/chat.py b/src/do_gradientai/resources/chat/chat.py similarity index 100% rename from src/gradientai/resources/chat/chat.py rename to src/do_gradientai/resources/chat/chat.py diff --git a/src/gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py similarity index 100% rename from src/gradientai/resources/chat/completions.py rename to src/do_gradientai/resources/chat/completions.py diff --git a/src/gradientai/resources/indexing_jobs.py b/src/do_gradientai/resources/indexing_jobs.py similarity index 100% rename from src/gradientai/resources/indexing_jobs.py rename to src/do_gradientai/resources/indexing_jobs.py diff --git a/src/gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py similarity index 100% rename from src/gradientai/resources/inference/__init__.py rename to src/do_gradientai/resources/inference/__init__.py diff --git a/src/gradientai/resources/inference/api_keys.py b/src/do_gradientai/resources/inference/api_keys.py similarity index 100% rename from src/gradientai/resources/inference/api_keys.py rename to src/do_gradientai/resources/inference/api_keys.py diff --git a/src/gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py similarity index 100% rename from src/gradientai/resources/inference/inference.py rename to src/do_gradientai/resources/inference/inference.py diff --git a/src/gradientai/resources/inference/models.py b/src/do_gradientai/resources/inference/models.py similarity index 100% rename from src/gradientai/resources/inference/models.py rename to src/do_gradientai/resources/inference/models.py diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/__init__.py rename to src/do_gradientai/resources/knowledge_bases/__init__.py diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/do_gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/data_sources.py rename to src/do_gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/do_gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/gradientai/resources/models.py b/src/do_gradientai/resources/models.py similarity index 100% rename from src/gradientai/resources/models.py rename to src/do_gradientai/resources/models.py diff --git a/src/gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/providers/__init__.py similarity index 100% rename from src/gradientai/resources/providers/__init__.py rename to src/do_gradientai/resources/providers/__init__.py diff --git a/src/gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/providers/anthropic/__init__.py similarity index 100% rename from src/gradientai/resources/providers/anthropic/__init__.py rename to src/do_gradientai/resources/providers/anthropic/__init__.py diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/providers/anthropic/anthropic.py similarity index 100% rename from src/gradientai/resources/providers/anthropic/anthropic.py rename to src/do_gradientai/resources/providers/anthropic/anthropic.py diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/providers/anthropic/keys.py similarity index 100% rename from src/gradientai/resources/providers/anthropic/keys.py rename to src/do_gradientai/resources/providers/anthropic/keys.py diff --git a/src/gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/providers/openai/__init__.py similarity index 100% rename from src/gradientai/resources/providers/openai/__init__.py rename to src/do_gradientai/resources/providers/openai/__init__.py diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/providers/openai/keys.py similarity index 100% rename from src/gradientai/resources/providers/openai/keys.py rename to src/do_gradientai/resources/providers/openai/keys.py diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/providers/openai/openai.py similarity index 100% rename from src/gradientai/resources/providers/openai/openai.py rename to src/do_gradientai/resources/providers/openai/openai.py diff --git a/src/gradientai/resources/providers/providers.py b/src/do_gradientai/resources/providers/providers.py similarity index 100% rename from src/gradientai/resources/providers/providers.py rename to src/do_gradientai/resources/providers/providers.py diff --git a/src/gradientai/resources/regions/__init__.py b/src/do_gradientai/resources/regions/__init__.py similarity index 100% rename from src/gradientai/resources/regions/__init__.py rename to src/do_gradientai/resources/regions/__init__.py diff --git a/src/gradientai/resources/regions/evaluation_datasets.py b/src/do_gradientai/resources/regions/evaluation_datasets.py similarity index 100% rename from src/gradientai/resources/regions/evaluation_datasets.py rename to src/do_gradientai/resources/regions/evaluation_datasets.py diff --git a/src/gradientai/resources/regions/evaluation_runs/__init__.py b/src/do_gradientai/resources/regions/evaluation_runs/__init__.py similarity index 100% rename from src/gradientai/resources/regions/evaluation_runs/__init__.py rename to src/do_gradientai/resources/regions/evaluation_runs/__init__.py diff --git a/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py b/src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py similarity index 100% rename from src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py rename to src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py diff --git a/src/gradientai/resources/regions/evaluation_runs/results.py b/src/do_gradientai/resources/regions/evaluation_runs/results.py similarity index 100% rename from src/gradientai/resources/regions/evaluation_runs/results.py rename to src/do_gradientai/resources/regions/evaluation_runs/results.py diff --git a/src/gradientai/resources/regions/evaluation_test_cases.py b/src/do_gradientai/resources/regions/evaluation_test_cases.py similarity index 100% rename from src/gradientai/resources/regions/evaluation_test_cases.py rename to src/do_gradientai/resources/regions/evaluation_test_cases.py diff --git a/src/gradientai/resources/regions/regions.py b/src/do_gradientai/resources/regions/regions.py similarity index 100% rename from src/gradientai/resources/regions/regions.py rename to src/do_gradientai/resources/regions/regions.py diff --git a/src/gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py similarity index 100% rename from src/gradientai/types/__init__.py rename to src/do_gradientai/types/__init__.py diff --git a/src/gradientai/types/agent_create_params.py b/src/do_gradientai/types/agent_create_params.py similarity index 100% rename from src/gradientai/types/agent_create_params.py rename to src/do_gradientai/types/agent_create_params.py diff --git a/src/gradientai/types/agent_create_response.py b/src/do_gradientai/types/agent_create_response.py similarity index 100% rename from src/gradientai/types/agent_create_response.py rename to src/do_gradientai/types/agent_create_response.py diff --git a/src/gradientai/types/agent_delete_response.py b/src/do_gradientai/types/agent_delete_response.py similarity index 100% rename from src/gradientai/types/agent_delete_response.py rename to src/do_gradientai/types/agent_delete_response.py diff --git a/src/gradientai/types/agent_list_params.py b/src/do_gradientai/types/agent_list_params.py similarity index 100% rename from src/gradientai/types/agent_list_params.py rename to src/do_gradientai/types/agent_list_params.py diff --git a/src/gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py similarity index 100% rename from src/gradientai/types/agent_list_response.py rename to src/do_gradientai/types/agent_list_response.py diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/do_gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/gradientai/types/agent_retrieve_response.py rename to src/do_gradientai/types/agent_retrieve_response.py diff --git a/src/gradientai/types/agent_update_params.py b/src/do_gradientai/types/agent_update_params.py similarity index 100% rename from src/gradientai/types/agent_update_params.py rename to src/do_gradientai/types/agent_update_params.py diff --git a/src/gradientai/types/agent_update_response.py b/src/do_gradientai/types/agent_update_response.py similarity index 100% rename from src/gradientai/types/agent_update_response.py rename to src/do_gradientai/types/agent_update_response.py diff --git a/src/gradientai/types/agent_update_status_params.py b/src/do_gradientai/types/agent_update_status_params.py similarity index 100% rename from src/gradientai/types/agent_update_status_params.py rename to src/do_gradientai/types/agent_update_status_params.py diff --git a/src/gradientai/types/agent_update_status_response.py b/src/do_gradientai/types/agent_update_status_response.py similarity index 100% rename from src/gradientai/types/agent_update_status_response.py rename to src/do_gradientai/types/agent_update_status_response.py diff --git a/src/gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py similarity index 100% rename from src/gradientai/types/agents/__init__.py rename to src/do_gradientai/types/agents/__init__.py diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/do_gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_params.py rename to src/do_gradientai/types/agents/api_key_create_params.py diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/do_gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_response.py rename to src/do_gradientai/types/agents/api_key_create_response.py diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/do_gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_delete_response.py rename to src/do_gradientai/types/agents/api_key_delete_response.py diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/do_gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_params.py rename to src/do_gradientai/types/agents/api_key_list_params.py diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_response.py rename to src/do_gradientai/types/agents/api_key_list_response.py diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/do_gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_regenerate_response.py rename to src/do_gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/do_gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_params.py rename to src/do_gradientai/types/agents/api_key_update_params.py diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/do_gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_response.py rename to src/do_gradientai/types/agents/api_key_update_response.py diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/do_gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/agents/api_link_knowledge_base_output.py rename to src/do_gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/agents/api_links.py b/src/do_gradientai/types/agents/api_links.py similarity index 100% rename from src/gradientai/types/agents/api_links.py rename to src/do_gradientai/types/agents/api_links.py diff --git a/src/gradientai/types/agents/api_meta.py b/src/do_gradientai/types/agents/api_meta.py similarity index 100% rename from src/gradientai/types/agents/api_meta.py rename to src/do_gradientai/types/agents/api_meta.py diff --git a/src/gradientai/types/agents/child_agent_add_params.py b/src/do_gradientai/types/agents/child_agent_add_params.py similarity index 100% rename from src/gradientai/types/agents/child_agent_add_params.py rename to src/do_gradientai/types/agents/child_agent_add_params.py diff --git a/src/gradientai/types/agents/child_agent_add_response.py b/src/do_gradientai/types/agents/child_agent_add_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_add_response.py rename to src/do_gradientai/types/agents/child_agent_add_response.py diff --git a/src/gradientai/types/agents/child_agent_delete_response.py b/src/do_gradientai/types/agents/child_agent_delete_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_delete_response.py rename to src/do_gradientai/types/agents/child_agent_delete_response.py diff --git a/src/gradientai/types/agents/child_agent_update_params.py b/src/do_gradientai/types/agents/child_agent_update_params.py similarity index 100% rename from src/gradientai/types/agents/child_agent_update_params.py rename to src/do_gradientai/types/agents/child_agent_update_params.py diff --git a/src/gradientai/types/agents/child_agent_update_response.py b/src/do_gradientai/types/agents/child_agent_update_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_update_response.py rename to src/do_gradientai/types/agents/child_agent_update_response.py diff --git a/src/gradientai/types/agents/child_agent_view_response.py b/src/do_gradientai/types/agents/child_agent_view_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_view_response.py rename to src/do_gradientai/types/agents/child_agent_view_response.py diff --git a/src/gradientai/types/agents/function_create_params.py b/src/do_gradientai/types/agents/function_create_params.py similarity index 100% rename from src/gradientai/types/agents/function_create_params.py rename to src/do_gradientai/types/agents/function_create_params.py diff --git a/src/gradientai/types/agents/function_create_response.py b/src/do_gradientai/types/agents/function_create_response.py similarity index 100% rename from src/gradientai/types/agents/function_create_response.py rename to src/do_gradientai/types/agents/function_create_response.py diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/do_gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/gradientai/types/agents/function_delete_response.py rename to src/do_gradientai/types/agents/function_delete_response.py diff --git a/src/gradientai/types/agents/function_update_params.py b/src/do_gradientai/types/agents/function_update_params.py similarity index 100% rename from src/gradientai/types/agents/function_update_params.py rename to src/do_gradientai/types/agents/function_update_params.py diff --git a/src/gradientai/types/agents/function_update_response.py b/src/do_gradientai/types/agents/function_update_response.py similarity index 100% rename from src/gradientai/types/agents/function_update_response.py rename to src/do_gradientai/types/agents/function_update_response.py diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/do_gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/agents/knowledge_base_detach_response.py rename to src/do_gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/agents/version_list_params.py b/src/do_gradientai/types/agents/version_list_params.py similarity index 100% rename from src/gradientai/types/agents/version_list_params.py rename to src/do_gradientai/types/agents/version_list_params.py diff --git a/src/gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py similarity index 100% rename from src/gradientai/types/agents/version_list_response.py rename to src/do_gradientai/types/agents/version_list_response.py diff --git a/src/gradientai/types/agents/version_update_params.py b/src/do_gradientai/types/agents/version_update_params.py similarity index 100% rename from src/gradientai/types/agents/version_update_params.py rename to src/do_gradientai/types/agents/version_update_params.py diff --git a/src/gradientai/types/agents/version_update_response.py b/src/do_gradientai/types/agents/version_update_response.py similarity index 100% rename from src/gradientai/types/agents/version_update_response.py rename to src/do_gradientai/types/agents/version_update_response.py diff --git a/src/gradientai/types/api_agent.py b/src/do_gradientai/types/api_agent.py similarity index 100% rename from src/gradientai/types/api_agent.py rename to src/do_gradientai/types/api_agent.py diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/do_gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/gradientai/types/api_agent_api_key_info.py rename to src/do_gradientai/types/api_agent_api_key_info.py diff --git a/src/gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py similarity index 100% rename from src/gradientai/types/api_agent_model.py rename to src/do_gradientai/types/api_agent_model.py diff --git a/src/gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py similarity index 100% rename from src/gradientai/types/api_agreement.py rename to src/do_gradientai/types/api_agreement.py diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/do_gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/gradientai/types/api_anthropic_api_key_info.py rename to src/do_gradientai/types/api_anthropic_api_key_info.py diff --git a/src/gradientai/types/api_deployment_visibility.py b/src/do_gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/gradientai/types/api_deployment_visibility.py rename to src/do_gradientai/types/api_deployment_visibility.py diff --git a/src/gradientai/types/api_evaluation_metric.py b/src/do_gradientai/types/api_evaluation_metric.py similarity index 100% rename from src/gradientai/types/api_evaluation_metric.py rename to src/do_gradientai/types/api_evaluation_metric.py diff --git a/src/gradientai/types/api_indexing_job.py b/src/do_gradientai/types/api_indexing_job.py similarity index 100% rename from src/gradientai/types/api_indexing_job.py rename to src/do_gradientai/types/api_indexing_job.py diff --git a/src/gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py similarity index 100% rename from src/gradientai/types/api_knowledge_base.py rename to src/do_gradientai/types/api_knowledge_base.py diff --git a/src/gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py similarity index 100% rename from src/gradientai/types/api_model.py rename to src/do_gradientai/types/api_model.py diff --git a/src/gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py similarity index 100% rename from src/gradientai/types/api_model_version.py rename to src/do_gradientai/types/api_model_version.py diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/do_gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/gradientai/types/api_openai_api_key_info.py rename to src/do_gradientai/types/api_openai_api_key_info.py diff --git a/src/gradientai/types/api_retrieval_method.py b/src/do_gradientai/types/api_retrieval_method.py similarity index 100% rename from src/gradientai/types/api_retrieval_method.py rename to src/do_gradientai/types/api_retrieval_method.py diff --git a/src/gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py similarity index 100% rename from src/gradientai/types/api_workspace.py rename to src/do_gradientai/types/api_workspace.py diff --git a/src/gradientai/types/chat/__init__.py b/src/do_gradientai/types/chat/__init__.py similarity index 100% rename from src/gradientai/types/chat/__init__.py rename to src/do_gradientai/types/chat/__init__.py diff --git a/src/gradientai/types/chat/chat_completion_token_logprob.py b/src/do_gradientai/types/chat/chat_completion_token_logprob.py similarity index 100% rename from src/gradientai/types/chat/chat_completion_token_logprob.py rename to src/do_gradientai/types/chat/chat_completion_token_logprob.py diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/do_gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/chat/completion_create_params.py rename to src/do_gradientai/types/chat/completion_create_params.py diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/do_gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/chat/completion_create_response.py rename to src/do_gradientai/types/chat/completion_create_response.py diff --git a/src/gradientai/types/indexing_job_create_params.py b/src/do_gradientai/types/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/indexing_job_create_params.py rename to src/do_gradientai/types/indexing_job_create_params.py diff --git a/src/gradientai/types/indexing_job_create_response.py b/src/do_gradientai/types/indexing_job_create_response.py similarity index 100% rename from src/gradientai/types/indexing_job_create_response.py rename to src/do_gradientai/types/indexing_job_create_response.py diff --git a/src/gradientai/types/indexing_job_list_params.py b/src/do_gradientai/types/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/indexing_job_list_params.py rename to src/do_gradientai/types/indexing_job_list_params.py diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/do_gradientai/types/indexing_job_list_response.py similarity index 100% rename from src/gradientai/types/indexing_job_list_response.py rename to src/do_gradientai/types/indexing_job_list_response.py diff --git a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/gradientai/types/indexing_job_retrieve_data_sources_response.py rename to src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py diff --git a/src/gradientai/types/indexing_job_retrieve_response.py b/src/do_gradientai/types/indexing_job_retrieve_response.py similarity index 100% rename from src/gradientai/types/indexing_job_retrieve_response.py rename to src/do_gradientai/types/indexing_job_retrieve_response.py diff --git a/src/gradientai/types/indexing_job_update_cancel_params.py b/src/do_gradientai/types/indexing_job_update_cancel_params.py similarity index 100% rename from src/gradientai/types/indexing_job_update_cancel_params.py rename to src/do_gradientai/types/indexing_job_update_cancel_params.py diff --git a/src/gradientai/types/indexing_job_update_cancel_response.py b/src/do_gradientai/types/indexing_job_update_cancel_response.py similarity index 100% rename from src/gradientai/types/indexing_job_update_cancel_response.py rename to src/do_gradientai/types/indexing_job_update_cancel_response.py diff --git a/src/gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py similarity index 100% rename from src/gradientai/types/inference/__init__.py rename to src/do_gradientai/types/inference/__init__.py diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/do_gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_params.py rename to src/do_gradientai/types/inference/api_key_create_params.py diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/do_gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_response.py rename to src/do_gradientai/types/inference/api_key_create_response.py diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/do_gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_delete_response.py rename to src/do_gradientai/types/inference/api_key_delete_response.py diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/do_gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_params.py rename to src/do_gradientai/types/inference/api_key_list_params.py diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_response.py rename to src/do_gradientai/types/inference/api_key_list_response.py diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/do_gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_params.py rename to src/do_gradientai/types/inference/api_key_update_params.py diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/do_gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_regenerate_response.py rename to src/do_gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/do_gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_response.py rename to src/do_gradientai/types/inference/api_key_update_response.py diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/do_gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/gradientai/types/inference/api_model_api_key_info.py rename to src/do_gradientai/types/inference/api_model_api_key_info.py diff --git a/src/gradientai/types/inference/model.py b/src/do_gradientai/types/inference/model.py similarity index 100% rename from src/gradientai/types/inference/model.py rename to src/do_gradientai/types/inference/model.py diff --git a/src/gradientai/types/inference/model_list_response.py b/src/do_gradientai/types/inference/model_list_response.py similarity index 100% rename from src/gradientai/types/inference/model_list_response.py rename to src/do_gradientai/types/inference/model_list_response.py diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/do_gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_params.py rename to src/do_gradientai/types/knowledge_base_create_params.py diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/do_gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_response.py rename to src/do_gradientai/types/knowledge_base_create_response.py diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/do_gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_delete_response.py rename to src/do_gradientai/types/knowledge_base_delete_response.py diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/do_gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_params.py rename to src/do_gradientai/types/knowledge_base_list_params.py diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_response.py rename to src/do_gradientai/types/knowledge_base_list_response.py diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/do_gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_retrieve_response.py rename to src/do_gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/do_gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_params.py rename to src/do_gradientai/types/knowledge_base_update_params.py diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/do_gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_response.py rename to src/do_gradientai/types/knowledge_base_update_response.py diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/types/knowledge_bases/__init__.py rename to src/do_gradientai/types/knowledge_bases/__init__.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/do_gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/do_gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/do_gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/gradientai/types/model_list_params.py b/src/do_gradientai/types/model_list_params.py similarity index 100% rename from src/gradientai/types/model_list_params.py rename to src/do_gradientai/types/model_list_params.py diff --git a/src/gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py similarity index 100% rename from src/gradientai/types/model_list_response.py rename to src/do_gradientai/types/model_list_response.py diff --git a/src/gradientai/types/providers/__init__.py b/src/do_gradientai/types/providers/__init__.py similarity index 100% rename from src/gradientai/types/providers/__init__.py rename to src/do_gradientai/types/providers/__init__.py diff --git a/src/gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/providers/anthropic/__init__.py similarity index 100% rename from src/gradientai/types/providers/anthropic/__init__.py rename to src/do_gradientai/types/providers/anthropic/__init__.py diff --git a/src/gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/providers/anthropic/key_create_params.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_create_params.py rename to src/do_gradientai/types/providers/anthropic/key_create_params.py diff --git a/src/gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/providers/anthropic/key_create_response.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_create_response.py rename to src/do_gradientai/types/providers/anthropic/key_create_response.py diff --git a/src/gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/providers/anthropic/key_delete_response.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_delete_response.py rename to src/do_gradientai/types/providers/anthropic/key_delete_response.py diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_list_agents_params.py rename to src/do_gradientai/types/providers/anthropic/key_list_agents_params.py diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_list_agents_response.py rename to src/do_gradientai/types/providers/anthropic/key_list_agents_response.py diff --git a/src/gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/providers/anthropic/key_list_params.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_list_params.py rename to src/do_gradientai/types/providers/anthropic/key_list_params.py diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/providers/anthropic/key_list_response.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_list_response.py rename to src/do_gradientai/types/providers/anthropic/key_list_response.py diff --git a/src/gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_retrieve_response.py rename to src/do_gradientai/types/providers/anthropic/key_retrieve_response.py diff --git a/src/gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/providers/anthropic/key_update_params.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_update_params.py rename to src/do_gradientai/types/providers/anthropic/key_update_params.py diff --git a/src/gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/providers/anthropic/key_update_response.py similarity index 100% rename from src/gradientai/types/providers/anthropic/key_update_response.py rename to src/do_gradientai/types/providers/anthropic/key_update_response.py diff --git a/src/gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/providers/openai/__init__.py similarity index 100% rename from src/gradientai/types/providers/openai/__init__.py rename to src/do_gradientai/types/providers/openai/__init__.py diff --git a/src/gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/providers/openai/key_create_params.py similarity index 100% rename from src/gradientai/types/providers/openai/key_create_params.py rename to src/do_gradientai/types/providers/openai/key_create_params.py diff --git a/src/gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/providers/openai/key_create_response.py similarity index 100% rename from src/gradientai/types/providers/openai/key_create_response.py rename to src/do_gradientai/types/providers/openai/key_create_response.py diff --git a/src/gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/providers/openai/key_delete_response.py similarity index 100% rename from src/gradientai/types/providers/openai/key_delete_response.py rename to src/do_gradientai/types/providers/openai/key_delete_response.py diff --git a/src/gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/providers/openai/key_list_params.py similarity index 100% rename from src/gradientai/types/providers/openai/key_list_params.py rename to src/do_gradientai/types/providers/openai/key_list_params.py diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/providers/openai/key_list_response.py similarity index 100% rename from src/gradientai/types/providers/openai/key_list_response.py rename to src/do_gradientai/types/providers/openai/key_list_response.py diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py similarity index 100% rename from src/gradientai/types/providers/openai/key_retrieve_agents_params.py rename to src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py similarity index 100% rename from src/gradientai/types/providers/openai/key_retrieve_agents_response.py rename to src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py diff --git a/src/gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_response.py similarity index 100% rename from src/gradientai/types/providers/openai/key_retrieve_response.py rename to src/do_gradientai/types/providers/openai/key_retrieve_response.py diff --git a/src/gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/providers/openai/key_update_params.py similarity index 100% rename from src/gradientai/types/providers/openai/key_update_params.py rename to src/do_gradientai/types/providers/openai/key_update_params.py diff --git a/src/gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/providers/openai/key_update_response.py similarity index 100% rename from src/gradientai/types/providers/openai/key_update_response.py rename to src/do_gradientai/types/providers/openai/key_update_response.py diff --git a/src/gradientai/types/region_list_evaluation_metrics_response.py b/src/do_gradientai/types/region_list_evaluation_metrics_response.py similarity index 100% rename from src/gradientai/types/region_list_evaluation_metrics_response.py rename to src/do_gradientai/types/region_list_evaluation_metrics_response.py diff --git a/src/gradientai/types/region_list_params.py b/src/do_gradientai/types/region_list_params.py similarity index 100% rename from src/gradientai/types/region_list_params.py rename to src/do_gradientai/types/region_list_params.py diff --git a/src/gradientai/types/region_list_response.py b/src/do_gradientai/types/region_list_response.py similarity index 100% rename from src/gradientai/types/region_list_response.py rename to src/do_gradientai/types/region_list_response.py diff --git a/src/gradientai/types/regions/__init__.py b/src/do_gradientai/types/regions/__init__.py similarity index 100% rename from src/gradientai/types/regions/__init__.py rename to src/do_gradientai/types/regions/__init__.py diff --git a/src/gradientai/types/regions/api_evaluation_test_case.py b/src/do_gradientai/types/regions/api_evaluation_test_case.py similarity index 100% rename from src/gradientai/types/regions/api_evaluation_test_case.py rename to src/do_gradientai/types/regions/api_evaluation_test_case.py diff --git a/src/gradientai/types/regions/api_star_metric.py b/src/do_gradientai/types/regions/api_star_metric.py similarity index 100% rename from src/gradientai/types/regions/api_star_metric.py rename to src/do_gradientai/types/regions/api_star_metric.py diff --git a/src/gradientai/types/regions/api_star_metric_param.py b/src/do_gradientai/types/regions/api_star_metric_param.py similarity index 100% rename from src/gradientai/types/regions/api_star_metric_param.py rename to src/do_gradientai/types/regions/api_star_metric_param.py diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/gradientai/types/regions/evaluation_dataset_create_params.py b/src/do_gradientai/types/regions/evaluation_dataset_create_params.py similarity index 100% rename from src/gradientai/types/regions/evaluation_dataset_create_params.py rename to src/do_gradientai/types/regions/evaluation_dataset_create_params.py diff --git a/src/gradientai/types/regions/evaluation_dataset_create_response.py b/src/do_gradientai/types/regions/evaluation_dataset_create_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_dataset_create_response.py rename to src/do_gradientai/types/regions/evaluation_dataset_create_response.py diff --git a/src/gradientai/types/regions/evaluation_run_create_params.py b/src/do_gradientai/types/regions/evaluation_run_create_params.py similarity index 100% rename from src/gradientai/types/regions/evaluation_run_create_params.py rename to src/do_gradientai/types/regions/evaluation_run_create_params.py diff --git a/src/gradientai/types/regions/evaluation_run_create_response.py b/src/do_gradientai/types/regions/evaluation_run_create_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_run_create_response.py rename to src/do_gradientai/types/regions/evaluation_run_create_response.py diff --git a/src/gradientai/types/regions/evaluation_run_retrieve_response.py b/src/do_gradientai/types/regions/evaluation_run_retrieve_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_run_retrieve_response.py rename to src/do_gradientai/types/regions/evaluation_run_retrieve_response.py diff --git a/src/gradientai/types/regions/evaluation_runs/__init__.py b/src/do_gradientai/types/regions/evaluation_runs/__init__.py similarity index 100% rename from src/gradientai/types/regions/evaluation_runs/__init__.py rename to src/do_gradientai/types/regions/evaluation_runs/__init__.py diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py b/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py similarity index 100% rename from src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py rename to src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py b/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py similarity index 100% rename from src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py rename to src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py diff --git a/src/gradientai/types/regions/evaluation_runs/api_prompt.py b/src/do_gradientai/types/regions/evaluation_runs/api_prompt.py similarity index 100% rename from src/gradientai/types/regions/evaluation_runs/api_prompt.py rename to src/do_gradientai/types/regions/evaluation_runs/api_prompt.py diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py b/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py rename to src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py b/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py rename to src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py diff --git a/src/gradientai/types/regions/evaluation_test_case_create_params.py b/src/do_gradientai/types/regions/evaluation_test_case_create_params.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_create_params.py rename to src/do_gradientai/types/regions/evaluation_test_case_create_params.py diff --git a/src/gradientai/types/regions/evaluation_test_case_create_response.py b/src/do_gradientai/types/regions/evaluation_test_case_create_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_create_response.py rename to src/do_gradientai/types/regions/evaluation_test_case_create_response.py diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py rename to src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py rename to src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/gradientai/types/regions/evaluation_test_case_list_response.py b/src/do_gradientai/types/regions/evaluation_test_case_list_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_list_response.py rename to src/do_gradientai/types/regions/evaluation_test_case_list_response.py diff --git a/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_retrieve_response.py rename to src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py diff --git a/src/gradientai/types/regions/evaluation_test_case_update_params.py b/src/do_gradientai/types/regions/evaluation_test_case_update_params.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_update_params.py rename to src/do_gradientai/types/regions/evaluation_test_case_update_params.py diff --git a/src/gradientai/types/regions/evaluation_test_case_update_response.py b/src/do_gradientai/types/regions/evaluation_test_case_update_response.py similarity index 100% rename from src/gradientai/types/regions/evaluation_test_case_update_response.py rename to src/do_gradientai/types/regions/evaluation_test_case_update_response.py diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index beb9666a..65351922 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py index daa7b10e..c5108463 100644 --- a/tests/api_resources/agents/test_child_agents.py +++ b/tests/api_resources/agents/test_child_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( ChildAgentAddResponse, ChildAgentViewResponse, ChildAgentDeleteResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 5a3693cb..2c5ceaf7 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index e62c05ff..0a007840 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 79f73672..314cd2e2 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index b4c09579..62f24534 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 90bf95b9..c48a5420 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py index 569345ed..e930d83f 100644 --- a/tests/api_resources/inference/test_models.py +++ b/tests/api_resources/inference/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import Model, ModelListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.inference import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 9c466e2f..15665a84 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py index 86ec19f4..7aa595f7 100644 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.providers.anthropic import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.providers.anthropic import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py index ce5cb4f5..714dc4bd 100644 --- a/tests/api_resources/providers/openai/test_keys.py +++ b/tests/api_resources/providers/openai/test_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.providers.openai import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.providers.openai import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, diff --git a/tests/api_resources/regions/evaluation_runs/test_results.py b/tests/api_resources/regions/evaluation_runs/test_results.py index 29deb8b2..e4b906bd 100644 --- a/tests/api_resources/regions/evaluation_runs/test_results.py +++ b/tests/api_resources/regions/evaluation_runs/test_results.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/regions/test_evaluation_datasets.py b/tests/api_resources/regions/test_evaluation_datasets.py index 3e3da0fe..6e7a5e52 100644 --- a/tests/api_resources/regions/test_evaluation_datasets.py +++ b/tests/api_resources/regions/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.regions import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.regions import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/regions/test_evaluation_runs.py b/tests/api_resources/regions/test_evaluation_runs.py index b2d3c634..09bf8525 100644 --- a/tests/api_resources/regions/test_evaluation_runs.py +++ b/tests/api_resources/regions/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.regions import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.regions import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, ) diff --git a/tests/api_resources/regions/test_evaluation_test_cases.py b/tests/api_resources/regions/test_evaluation_test_cases.py index a01ace90..7cc18835 100644 --- a/tests/api_resources/regions/test_evaluation_test_cases.py +++ b/tests/api_resources/regions/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.regions import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.regions import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2cc0e080..74c8cdab 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py index 6a50d9b5..41ba0f8c 100644 --- a/tests/api_resources/test_indexing_jobs.py +++ b/tests/api_resources/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 508820ce..2132cd50 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 5e119f71..f7e21015 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 9cb24b0a..bf51ef96 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/conftest.py b/tests/conftest.py index 23079a7e..daa5b955 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from gradientai._utils import is_dict +from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from do_gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("gradientai").setLevel(logging.DEBUG) +logging.getLogger("do_gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index f19a5edb..4d20a597 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,11 +21,11 @@ from respx import MockRouter from pydantic import ValidationError -from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from gradientai._types import Omit -from gradientai._models import BaseModel, FinalRequestOptions -from gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError -from gradientai._base_client import ( +from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from do_gradientai._types import Omit +from do_gradientai._models import BaseModel, FinalRequestOptions +from do_gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError +from do_gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -231,10 +231,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -718,7 +718,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -728,7 +728,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) @@ -738,7 +738,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -769,7 +769,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -794,7 +794,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1043,10 +1043,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1536,7 +1536,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1548,7 +1548,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1560,7 +1560,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1592,7 +1592,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1618,7 +1618,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -1654,8 +1654,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from gradientai._utils import asyncify - from gradientai._base_client import get_platform + from do_gradientai._utils import asyncify + from do_gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 9d1579a8..5a98ce1b 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from gradientai._utils import deepcopy_minimal +from do_gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 2905d59c..341e65ae 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from gradientai._types import FileTypes -from gradientai._utils import extract_files +from do_gradientai._types import FileTypes +from do_gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index 4a723313..ff7914bb 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from gradientai._files import to_httpx_files, async_to_httpx_files +from do_gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 28aff1f3..575dc3af 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from gradientai._utils import PropertyInfo -from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from gradientai._models import BaseModel, construct_type +from do_gradientai._utils import PropertyInfo +from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from do_gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index 9080377b..c9213571 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from gradientai._qs import Querystring, stringify +from do_gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index c4e6b9d8..434e9491 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from gradientai._utils import required_args +from do_gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 1a8f241e..001ce776 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from gradientai import BaseModel, GradientAI, AsyncGradientAI -from gradientai._response import ( +from do_gradientai import BaseModel, GradientAI, AsyncGradientAI +from do_gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from gradientai._streaming import Stream -from gradientai._base_client import FinalRequestOptions +from do_gradientai._streaming import Stream +from do_gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index cdb41a77..c1ce8e85 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from gradientai import GradientAI, AsyncGradientAI -from gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 825fe048..30c06d6a 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from gradientai._types import NOT_GIVEN, Base64FileInput -from gradientai._utils import ( +from do_gradientai._types import NOT_GIVEN, Base64FileInput +from do_gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from gradientai._compat import PYDANTIC_V2 -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2 +from do_gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 3856b2c9..9ce2e0d3 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from gradientai._utils import LazyProxy +from do_gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 66ad064f..c9129fdc 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from gradientai._utils import extract_type_var_from_base +from do_gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index b539ed2c..9def7c60 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from gradientai._types import Omit, NoneType -from gradientai._utils import ( +from do_gradientai._types import Omit, NoneType +from do_gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from do_gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From 40c63e2eaa5f28ccf29613a17d2a3a8fb481646d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 02:34:55 +0000 Subject: [PATCH 063/200] chore(internal): codegen related update --- tests/test_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_client.py b/tests/test_client.py index 4d20a597..920275ae 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -191,6 +191,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1003,6 +1004,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") From 2a0f042c8abaa4a917bdd6c56ca4611a39c31526 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 02:35:17 +0000 Subject: [PATCH 064/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index ed791f90..be3d4054 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 0c94579072c21854f9e042dfaac74e1d +config_hash: 72d372e69afa63549cdb9df236ac0cbf From 236c034732a23588201ae4624b0da75b7949618a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 02:58:23 +0000 Subject: [PATCH 065/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- README.md | 2 +- api.md | 155 ++++---- src/do_gradientai/_client.py | 2 +- .../resources/agents/__init__.py | 56 +++ src/do_gradientai/resources/agents/agents.py | 128 +++++++ .../evaluation_datasets.py | 6 +- .../resources/agents/evaluation_metrics.py | 145 ++++++++ .../evaluation_runs.py | 139 +++++-- .../evaluation_test_cases.py | 14 +- src/do_gradientai/resources/regions.py | 195 ++++++++++ .../resources/regions/__init__.py | 61 --- .../regions/evaluation_runs/__init__.py | 33 -- .../regions/evaluation_runs/results.py | 264 ------------- .../resources/regions/regions.py | 352 ------------------ src/do_gradientai/types/__init__.py | 4 - src/do_gradientai/types/agents/__init__.py | 34 ++ .../{ => agents}/api_evaluation_metric.py | 2 +- .../api_evaluation_metric_result.py | 2 +- .../api_evaluation_prompt.py} | 6 +- .../api_evaluation_run.py | 2 +- .../api_evaluation_test_case.py | 2 +- .../{regions => agents}/api_star_metric.py | 0 .../api_star_metric_param.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_response.py} | 6 +- .../evaluation_run_create_params.py | 0 .../evaluation_run_create_response.py | 0 .../evaluation_run_list_results_response.py} | 10 +- .../evaluation_run_retrieve_response.py | 2 +- .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 2 +- .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 src/do_gradientai/types/api_workspace.py | 2 +- src/do_gradientai/types/regions/__init__.py | 32 -- .../types/regions/evaluation_runs/__init__.py | 9 - .../result_retrieve_prompt_response.py | 12 - .../test_evaluation_datasets.py | 34 +- .../agents/test_evaluation_metrics.py | 80 ++++ .../test_evaluation_runs.py | 119 +++++- .../test_evaluation_test_cases.py | 86 ++--- tests/api_resources/regions/__init__.py | 1 - .../regions/evaluation_runs/__init__.py | 1 - .../regions/evaluation_runs/test_results.py | 200 ---------- tests/api_resources/test_regions.py | 58 +-- 53 files changed, 1007 insertions(+), 1255 deletions(-) rename src/do_gradientai/resources/{regions => agents}/evaluation_datasets.py (98%) create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics.py rename src/do_gradientai/resources/{regions/evaluation_runs => agents}/evaluation_runs.py (70%) rename src/do_gradientai/resources/{regions => agents}/evaluation_test_cases.py (97%) create mode 100644 src/do_gradientai/resources/regions.py delete mode 100644 src/do_gradientai/resources/regions/__init__.py delete mode 100644 src/do_gradientai/resources/regions/evaluation_runs/__init__.py delete mode 100644 src/do_gradientai/resources/regions/evaluation_runs/results.py delete mode 100644 src/do_gradientai/resources/regions/regions.py rename src/do_gradientai/types/{ => agents}/api_evaluation_metric.py (95%) rename src/do_gradientai/types/{regions/evaluation_runs => agents}/api_evaluation_metric_result.py (92%) rename src/do_gradientai/types/{regions/evaluation_runs/api_prompt.py => agents/api_evaluation_prompt.py} (90%) rename src/do_gradientai/types/{regions/evaluation_runs => agents}/api_evaluation_run.py (97%) rename src/do_gradientai/types/{regions => agents}/api_evaluation_test_case.py (94%) rename src/do_gradientai/types/{regions => agents}/api_star_metric.py (100%) rename src/do_gradientai/types/{regions => agents}/api_star_metric_param.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_params.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_response.py (100%) rename src/do_gradientai/types/{region_list_evaluation_metrics_response.py => agents/evaluation_metric_list_response.py} (63%) rename src/do_gradientai/types/{regions => agents}/evaluation_run_create_params.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_run_create_response.py (100%) rename src/do_gradientai/types/{regions/evaluation_runs/result_retrieve_response.py => agents/evaluation_run_list_results_response.py} (52%) rename src/do_gradientai/types/{regions => agents}/evaluation_run_retrieve_response.py (82%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_create_params.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_create_response.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_list_evaluation_runs_response.py (85%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_list_response.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_retrieve_response.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_update_params.py (100%) rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_update_response.py (100%) delete mode 100644 src/do_gradientai/types/regions/__init__.py delete mode 100644 src/do_gradientai/types/regions/evaluation_runs/__init__.py delete mode 100644 src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py rename tests/api_resources/{regions => agents}/test_evaluation_datasets.py (82%) create mode 100644 tests/api_resources/agents/test_evaluation_metrics.py rename tests/api_resources/{regions => agents}/test_evaluation_runs.py (56%) rename tests/api_resources/{regions => agents}/test_evaluation_test_cases.py (82%) delete mode 100644 tests/api_resources/regions/__init__.py delete mode 100644 tests/api_resources/regions/evaluation_runs/__init__.py delete mode 100644 tests/api_resources/regions/evaluation_runs/test_results.py diff --git a/.stats.yml b/.stats.yml index be3d4054..49720dd2 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 70 +configured_endpoints: 69 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 72d372e69afa63549cdb9df236ac0cbf +config_hash: 190bed33fe275347e4871077b32af63f diff --git a/README.md b/README.md index 64a2af23..87682600 100644 --- a/README.md +++ b/README.md @@ -120,7 +120,7 @@ from do_gradientai import GradientAI client = GradientAI() -evaluation_test_case = client.regions.evaluation_test_cases.create( +evaluation_test_case = client.agents.evaluation_test_cases.create( star_metric={}, ) print(evaluation_test_case.star_metric) diff --git a/api.md b/api.md index a10c03ef..018742d7 100644 --- a/api.md +++ b/api.md @@ -52,6 +52,80 @@ Methods: - client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse - client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +## EvaluationMetrics + +Types: + +```python +from do_gradientai.types.agents import EvaluationMetricListResponse +``` + +Methods: + +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse + +## EvaluationRuns + +Types: + +```python +from do_gradientai.types.agents import ( + APIEvaluationMetric, + APIEvaluationMetricResult, + APIEvaluationPrompt, + APIEvaluationRun, + EvaluationRunCreateResponse, + EvaluationRunRetrieveResponse, + EvaluationRunListResultsResponse, +) +``` + +Methods: + +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse + +## EvaluationTestCases + +Types: + +```python +from do_gradientai.types.agents import ( + APIEvaluationTestCase, + APIStarMetric, + EvaluationTestCaseCreateResponse, + EvaluationTestCaseRetrieveResponse, + EvaluationTestCaseUpdateResponse, + EvaluationTestCaseListResponse, + EvaluationTestCaseListEvaluationRunsResponse, +) +``` + +Methods: + +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse + +## EvaluationDatasets + +Types: + +```python +from do_gradientai.types.agents import ( + EvaluationDatasetCreateResponse, + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) +``` + +Methods: + +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse + ## Functions Types: @@ -176,89 +250,12 @@ Methods: Types: ```python -from do_gradientai.types import ( - APIEvaluationMetric, - RegionListResponse, - RegionListEvaluationMetricsResponse, -) -``` - -Methods: - -- client.regions.list(\*\*params) -> RegionListResponse -- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse - -## EvaluationRuns - -Types: - -```python -from do_gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse -``` - -Methods: - -- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse - -### Results - -Types: - -```python -from do_gradientai.types.regions.evaluation_runs import ( - APIEvaluationMetricResult, - APIEvaluationRun, - APIPrompt, - ResultRetrieveResponse, - ResultRetrievePromptResponse, -) -``` - -Methods: - -- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse -- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse - -## EvaluationTestCases - -Types: - -```python -from do_gradientai.types.regions import ( - APIEvaluationTestCase, - APIStarMetric, - EvaluationTestCaseCreateResponse, - EvaluationTestCaseRetrieveResponse, - EvaluationTestCaseUpdateResponse, - EvaluationTestCaseListResponse, - EvaluationTestCaseListEvaluationRunsResponse, -) -``` - -Methods: - -- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse -- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse - -## EvaluationDatasets - -Types: - -```python -from do_gradientai.types.regions import ( - EvaluationDatasetCreateResponse, - EvaluationDatasetCreateFileUploadPresignedURLsResponse, -) +from do_gradientai.types import RegionListResponse ``` Methods: -- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.regions.list(\*\*params) -> RegionListResponse # IndexingJobs diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index 71db35bc..8710fe68 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -33,10 +33,10 @@ if TYPE_CHECKING: from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource + from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py index f41a0408..3eb9cde8 100644 --- a/src/do_gradientai/resources/agents/__init__.py +++ b/src/do_gradientai/resources/agents/__init__.py @@ -40,6 +40,14 @@ ChildAgentsResourceWithStreamingResponse, AsyncChildAgentsResourceWithStreamingResponse, ) +from .evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -48,6 +56,30 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) +from .evaluation_metrics import ( + EvaluationMetricsResource, + AsyncEvaluationMetricsResource, + EvaluationMetricsResourceWithRawResponse, + AsyncEvaluationMetricsResourceWithRawResponse, + EvaluationMetricsResourceWithStreamingResponse, + AsyncEvaluationMetricsResourceWithStreamingResponse, +) +from .evaluation_datasets import ( + EvaluationDatasetsResource, + AsyncEvaluationDatasetsResource, + EvaluationDatasetsResourceWithRawResponse, + AsyncEvaluationDatasetsResourceWithRawResponse, + EvaluationDatasetsResourceWithStreamingResponse, + AsyncEvaluationDatasetsResourceWithStreamingResponse, +) +from .evaluation_test_cases import ( + EvaluationTestCasesResource, + AsyncEvaluationTestCasesResource, + EvaluationTestCasesResourceWithRawResponse, + AsyncEvaluationTestCasesResourceWithRawResponse, + EvaluationTestCasesResourceWithStreamingResponse, + AsyncEvaluationTestCasesResourceWithStreamingResponse, +) __all__ = [ "APIKeysResource", @@ -56,6 +88,30 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", + "EvaluationMetricsResource", + "AsyncEvaluationMetricsResource", + "EvaluationMetricsResourceWithRawResponse", + "AsyncEvaluationMetricsResourceWithRawResponse", + "EvaluationMetricsResourceWithStreamingResponse", + "AsyncEvaluationMetricsResourceWithStreamingResponse", + "EvaluationRunsResource", + "AsyncEvaluationRunsResource", + "EvaluationRunsResourceWithRawResponse", + "AsyncEvaluationRunsResourceWithRawResponse", + "EvaluationRunsResourceWithStreamingResponse", + "AsyncEvaluationRunsResourceWithStreamingResponse", + "EvaluationTestCasesResource", + "AsyncEvaluationTestCasesResource", + "EvaluationTestCasesResourceWithRawResponse", + "AsyncEvaluationTestCasesResourceWithRawResponse", + "EvaluationTestCasesResourceWithStreamingResponse", + "AsyncEvaluationTestCasesResourceWithStreamingResponse", + "EvaluationDatasetsResource", + "AsyncEvaluationDatasetsResource", + "EvaluationDatasetsResourceWithRawResponse", + "AsyncEvaluationDatasetsResourceWithRawResponse", + "EvaluationDatasetsResourceWithStreamingResponse", + "AsyncEvaluationDatasetsResourceWithStreamingResponse", "FunctionsResource", "AsyncFunctionsResource", "FunctionsResourceWithRawResponse", diff --git a/src/do_gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py index 63f0c4d4..6bb39894 100644 --- a/src/do_gradientai/resources/agents/agents.py +++ b/src/do_gradientai/resources/agents/agents.py @@ -57,6 +57,14 @@ AsyncChildAgentsResourceWithStreamingResponse, ) from ..._base_client import make_request_options +from .evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -65,6 +73,30 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) +from .evaluation_metrics import ( + EvaluationMetricsResource, + AsyncEvaluationMetricsResource, + EvaluationMetricsResourceWithRawResponse, + AsyncEvaluationMetricsResourceWithRawResponse, + EvaluationMetricsResourceWithStreamingResponse, + AsyncEvaluationMetricsResourceWithStreamingResponse, +) +from .evaluation_datasets import ( + EvaluationDatasetsResource, + AsyncEvaluationDatasetsResource, + EvaluationDatasetsResourceWithRawResponse, + AsyncEvaluationDatasetsResourceWithRawResponse, + EvaluationDatasetsResourceWithStreamingResponse, + AsyncEvaluationDatasetsResourceWithStreamingResponse, +) +from .evaluation_test_cases import ( + EvaluationTestCasesResource, + AsyncEvaluationTestCasesResource, + EvaluationTestCasesResourceWithRawResponse, + AsyncEvaluationTestCasesResourceWithRawResponse, + EvaluationTestCasesResourceWithStreamingResponse, + AsyncEvaluationTestCasesResourceWithStreamingResponse, +) from ...types.agent_list_response import AgentListResponse from ...types.api_retrieval_method import APIRetrievalMethod from ...types.agent_create_response import AgentCreateResponse @@ -82,6 +114,22 @@ class AgentsResource(SyncAPIResource): def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) + @cached_property + def evaluation_metrics(self) -> EvaluationMetricsResource: + return EvaluationMetricsResource(self._client) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResource: + return EvaluationRunsResource(self._client) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResource: + return EvaluationTestCasesResource(self._client) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResource: + return EvaluationDatasetsResource(self._client) + @cached_property def functions(self) -> FunctionsResource: return FunctionsResource(self._client) @@ -450,6 +498,22 @@ class AsyncAgentsResource(AsyncAPIResource): def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) + @cached_property + def evaluation_metrics(self) -> AsyncEvaluationMetricsResource: + return AsyncEvaluationMetricsResource(self._client) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResource: + return AsyncEvaluationRunsResource(self._client) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource: + return AsyncEvaluationTestCasesResource(self._client) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource: + return AsyncEvaluationDatasetsResource(self._client) + @cached_property def functions(self) -> AsyncFunctionsResource: return AsyncFunctionsResource(self._client) @@ -840,6 +904,22 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def evaluation_metrics(self) -> EvaluationMetricsResourceWithRawResponse: + return EvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse: + return EvaluationRunsResourceWithRawResponse(self._agents.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse: + return EvaluationTestCasesResourceWithRawResponse(self._agents.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse: + return EvaluationDatasetsResourceWithRawResponse(self._agents.evaluation_datasets) + @cached_property def functions(self) -> FunctionsResourceWithRawResponse: return FunctionsResourceWithRawResponse(self._agents.functions) @@ -884,6 +964,22 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithRawResponse: + return AsyncEvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse: + return AsyncEvaluationRunsResourceWithRawResponse(self._agents.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: + return AsyncEvaluationTestCasesResourceWithRawResponse(self._agents.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: + return AsyncEvaluationDatasetsResourceWithRawResponse(self._agents.evaluation_datasets) + @cached_property def functions(self) -> AsyncFunctionsResourceWithRawResponse: return AsyncFunctionsResourceWithRawResponse(self._agents.functions) @@ -928,6 +1024,22 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def evaluation_metrics(self) -> EvaluationMetricsResourceWithStreamingResponse: + return EvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse: + return EvaluationRunsResourceWithStreamingResponse(self._agents.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse: + return EvaluationTestCasesResourceWithStreamingResponse(self._agents.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse: + return EvaluationDatasetsResourceWithStreamingResponse(self._agents.evaluation_datasets) + @cached_property def functions(self) -> FunctionsResourceWithStreamingResponse: return FunctionsResourceWithStreamingResponse(self._agents.functions) @@ -972,6 +1084,22 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse: + return AsyncEvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse: + return AsyncEvaluationRunsResourceWithStreamingResponse(self._agents.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse: + return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._agents.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse: + return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._agents.evaluation_datasets) + @cached_property def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) diff --git a/src/do_gradientai/resources/regions/evaluation_datasets.py b/src/do_gradientai/resources/agents/evaluation_datasets.py similarity index 98% rename from src/do_gradientai/resources/regions/evaluation_datasets.py rename to src/do_gradientai/resources/agents/evaluation_datasets.py index f82e9701..42eca703 100644 --- a/src/do_gradientai/resources/regions/evaluation_datasets.py +++ b/src/do_gradientai/resources/agents/evaluation_datasets.py @@ -17,13 +17,13 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.regions import ( +from ...types.agents import ( evaluation_dataset_create_params, evaluation_dataset_create_file_upload_presigned_urls_params, ) -from ...types.regions.evaluation_dataset_create_response import EvaluationDatasetCreateResponse +from ...types.agents.evaluation_dataset_create_response import EvaluationDatasetCreateResponse from ...types.knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam -from ...types.regions.evaluation_dataset_create_file_upload_presigned_urls_response import ( +from ...types.agents.evaluation_dataset_create_file_upload_presigned_urls_response import ( EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics.py new file mode 100644 index 00000000..c554be3e --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics.py @@ -0,0 +1,145 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents.evaluation_metric_list_response import EvaluationMetricListResponse + +__all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"] + + +class EvaluationMetricsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationMetricsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationMetricsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationMetricsResourceWithStreamingResponse(self) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationMetricListResponse: + """ + To list all evaluation metrics, send a GET request to + `/v2/gen-ai/evaluation_metrics`. + """ + return self._get( + "/v2/gen-ai/evaluation_metrics" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationMetricListResponse, + ) + + +class AsyncEvaluationMetricsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationMetricsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationMetricsResourceWithStreamingResponse(self) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationMetricListResponse: + """ + To list all evaluation metrics, send a GET request to + `/v2/gen-ai/evaluation_metrics`. + """ + return await self._get( + "/v2/gen-ai/evaluation_metrics" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationMetricListResponse, + ) + + +class EvaluationMetricsResourceWithRawResponse: + def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: + self._evaluation_metrics = evaluation_metrics + + self.list = to_raw_response_wrapper( + evaluation_metrics.list, + ) + + +class AsyncEvaluationMetricsResourceWithRawResponse: + def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: + self._evaluation_metrics = evaluation_metrics + + self.list = async_to_raw_response_wrapper( + evaluation_metrics.list, + ) + + +class EvaluationMetricsResourceWithStreamingResponse: + def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: + self._evaluation_metrics = evaluation_metrics + + self.list = to_streamed_response_wrapper( + evaluation_metrics.list, + ) + + +class AsyncEvaluationMetricsResourceWithStreamingResponse: + def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: + self._evaluation_metrics = evaluation_metrics + + self.list = async_to_streamed_response_wrapper( + evaluation_metrics.list, + ) diff --git a/src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py similarity index 70% rename from src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py rename to src/do_gradientai/resources/agents/evaluation_runs.py index 9221c45c..7e207e7d 100644 --- a/src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py +++ b/src/do_gradientai/resources/agents/evaluation_runs.py @@ -4,37 +4,26 @@ import httpx -from .results import ( - ResultsResource, - AsyncResultsResource, - ResultsResourceWithRawResponse, - AsyncResultsResourceWithRawResponse, - ResultsResourceWithStreamingResponse, - AsyncResultsResourceWithStreamingResponse, -) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ...._base_client import make_request_options -from ....types.regions import evaluation_run_create_params -from ....types.regions.evaluation_run_create_response import EvaluationRunCreateResponse -from ....types.regions.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse +from ..._base_client import make_request_options +from ...types.agents import evaluation_run_create_params +from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse +from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse +from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse __all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"] class EvaluationRunsResource(SyncAPIResource): - @cached_property - def results(self) -> ResultsResource: - return ResultsResource(self._client) - @cached_property def with_raw_response(self) -> EvaluationRunsResourceWithRawResponse: """ @@ -140,12 +129,46 @@ def retrieve( cast_to=EvaluationRunRetrieveResponse, ) + def list_results( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunListResultsResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunListResultsResponse, + ) -class AsyncEvaluationRunsResource(AsyncAPIResource): - @cached_property - def results(self) -> AsyncResultsResource: - return AsyncResultsResource(self._client) +class AsyncEvaluationRunsResource(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEvaluationRunsResourceWithRawResponse: """ @@ -251,6 +274,44 @@ async def retrieve( cast_to=EvaluationRunRetrieveResponse, ) + async def list_results( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunListResultsResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunListResultsResponse, + ) + class EvaluationRunsResourceWithRawResponse: def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: @@ -262,10 +323,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: self.retrieve = to_raw_response_wrapper( evaluation_runs.retrieve, ) - - @cached_property - def results(self) -> ResultsResourceWithRawResponse: - return ResultsResourceWithRawResponse(self._evaluation_runs.results) + self.list_results = to_raw_response_wrapper( + evaluation_runs.list_results, + ) class AsyncEvaluationRunsResourceWithRawResponse: @@ -278,10 +338,9 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: self.retrieve = async_to_raw_response_wrapper( evaluation_runs.retrieve, ) - - @cached_property - def results(self) -> AsyncResultsResourceWithRawResponse: - return AsyncResultsResourceWithRawResponse(self._evaluation_runs.results) + self.list_results = async_to_raw_response_wrapper( + evaluation_runs.list_results, + ) class EvaluationRunsResourceWithStreamingResponse: @@ -294,10 +353,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: self.retrieve = to_streamed_response_wrapper( evaluation_runs.retrieve, ) - - @cached_property - def results(self) -> ResultsResourceWithStreamingResponse: - return ResultsResourceWithStreamingResponse(self._evaluation_runs.results) + self.list_results = to_streamed_response_wrapper( + evaluation_runs.list_results, + ) class AsyncEvaluationRunsResourceWithStreamingResponse: @@ -310,7 +368,6 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: self.retrieve = async_to_streamed_response_wrapper( evaluation_runs.retrieve, ) - - @cached_property - def results(self) -> AsyncResultsResourceWithStreamingResponse: - return AsyncResultsResourceWithStreamingResponse(self._evaluation_runs.results) + self.list_results = async_to_streamed_response_wrapper( + evaluation_runs.list_results, + ) diff --git a/src/do_gradientai/resources/regions/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py similarity index 97% rename from src/do_gradientai/resources/regions/evaluation_test_cases.py rename to src/do_gradientai/resources/agents/evaluation_test_cases.py index eed4d8b4..995df025 100644 --- a/src/do_gradientai/resources/regions/evaluation_test_cases.py +++ b/src/do_gradientai/resources/agents/evaluation_test_cases.py @@ -17,17 +17,17 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.regions import ( +from ...types.agents import ( evaluation_test_case_create_params, evaluation_test_case_update_params, evaluation_test_case_list_evaluation_runs_params, ) -from ...types.regions.api_star_metric_param import APIStarMetricParam -from ...types.regions.evaluation_test_case_list_response import EvaluationTestCaseListResponse -from ...types.regions.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse -from ...types.regions.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse -from ...types.regions.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse -from ...types.regions.evaluation_test_case_list_evaluation_runs_response import ( +from ...types.agents.api_star_metric_param import APIStarMetricParam +from ...types.agents.evaluation_test_case_list_response import EvaluationTestCaseListResponse +from ...types.agents.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse +from ...types.agents.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse +from ...types.agents.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse +from ...types.agents.evaluation_test_case_list_evaluation_runs_response import ( EvaluationTestCaseListEvaluationRunsResponse, ) diff --git a/src/do_gradientai/resources/regions.py b/src/do_gradientai/resources/regions.py new file mode 100644 index 00000000..4c50d9e6 --- /dev/null +++ b/src/do_gradientai/resources/regions.py @@ -0,0 +1,195 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..types import region_list_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.region_list_response import RegionListResponse + +__all__ = ["RegionsResource", "AsyncRegionsResource"] + + +class RegionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return RegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return RegionsResourceWithStreamingResponse(self) + + def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class AsyncRegionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncRegionsResourceWithStreamingResponse(self) + + async def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class RegionsResourceWithRawResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_raw_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithRawResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_raw_response_wrapper( + regions.list, + ) + + +class RegionsResourceWithStreamingResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_streamed_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithStreamingResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_streamed_response_wrapper( + regions.list, + ) diff --git a/src/do_gradientai/resources/regions/__init__.py b/src/do_gradientai/resources/regions/__init__.py deleted file mode 100644 index 51a96d61..00000000 --- a/src/do_gradientai/resources/regions/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .regions import ( - RegionsResource, - AsyncRegionsResource, - RegionsResourceWithRawResponse, - AsyncRegionsResourceWithRawResponse, - RegionsResourceWithStreamingResponse, - AsyncRegionsResourceWithStreamingResponse, -) -from .evaluation_runs import ( - EvaluationRunsResource, - AsyncEvaluationRunsResource, - EvaluationRunsResourceWithRawResponse, - AsyncEvaluationRunsResourceWithRawResponse, - EvaluationRunsResourceWithStreamingResponse, - AsyncEvaluationRunsResourceWithStreamingResponse, -) -from .evaluation_datasets import ( - EvaluationDatasetsResource, - AsyncEvaluationDatasetsResource, - EvaluationDatasetsResourceWithRawResponse, - AsyncEvaluationDatasetsResourceWithRawResponse, - EvaluationDatasetsResourceWithStreamingResponse, - AsyncEvaluationDatasetsResourceWithStreamingResponse, -) -from .evaluation_test_cases import ( - EvaluationTestCasesResource, - AsyncEvaluationTestCasesResource, - EvaluationTestCasesResourceWithRawResponse, - AsyncEvaluationTestCasesResourceWithRawResponse, - EvaluationTestCasesResourceWithStreamingResponse, - AsyncEvaluationTestCasesResourceWithStreamingResponse, -) - -__all__ = [ - "EvaluationRunsResource", - "AsyncEvaluationRunsResource", - "EvaluationRunsResourceWithRawResponse", - "AsyncEvaluationRunsResourceWithRawResponse", - "EvaluationRunsResourceWithStreamingResponse", - "AsyncEvaluationRunsResourceWithStreamingResponse", - "EvaluationTestCasesResource", - "AsyncEvaluationTestCasesResource", - "EvaluationTestCasesResourceWithRawResponse", - "AsyncEvaluationTestCasesResourceWithRawResponse", - "EvaluationTestCasesResourceWithStreamingResponse", - "AsyncEvaluationTestCasesResourceWithStreamingResponse", - "EvaluationDatasetsResource", - "AsyncEvaluationDatasetsResource", - "EvaluationDatasetsResourceWithRawResponse", - "AsyncEvaluationDatasetsResourceWithRawResponse", - "EvaluationDatasetsResourceWithStreamingResponse", - "AsyncEvaluationDatasetsResourceWithStreamingResponse", - "RegionsResource", - "AsyncRegionsResource", - "RegionsResourceWithRawResponse", - "AsyncRegionsResourceWithRawResponse", - "RegionsResourceWithStreamingResponse", - "AsyncRegionsResourceWithStreamingResponse", -] diff --git a/src/do_gradientai/resources/regions/evaluation_runs/__init__.py b/src/do_gradientai/resources/regions/evaluation_runs/__init__.py deleted file mode 100644 index e5580dd0..00000000 --- a/src/do_gradientai/resources/regions/evaluation_runs/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .results import ( - ResultsResource, - AsyncResultsResource, - ResultsResourceWithRawResponse, - AsyncResultsResourceWithRawResponse, - ResultsResourceWithStreamingResponse, - AsyncResultsResourceWithStreamingResponse, -) -from .evaluation_runs import ( - EvaluationRunsResource, - AsyncEvaluationRunsResource, - EvaluationRunsResourceWithRawResponse, - AsyncEvaluationRunsResourceWithRawResponse, - EvaluationRunsResourceWithStreamingResponse, - AsyncEvaluationRunsResourceWithStreamingResponse, -) - -__all__ = [ - "ResultsResource", - "AsyncResultsResource", - "ResultsResourceWithRawResponse", - "AsyncResultsResourceWithRawResponse", - "ResultsResourceWithStreamingResponse", - "AsyncResultsResourceWithStreamingResponse", - "EvaluationRunsResource", - "AsyncEvaluationRunsResource", - "EvaluationRunsResourceWithRawResponse", - "AsyncEvaluationRunsResourceWithRawResponse", - "EvaluationRunsResourceWithStreamingResponse", - "AsyncEvaluationRunsResourceWithStreamingResponse", -] diff --git a/src/do_gradientai/resources/regions/evaluation_runs/results.py b/src/do_gradientai/resources/regions/evaluation_runs/results.py deleted file mode 100644 index ad74a778..00000000 --- a/src/do_gradientai/resources/regions/evaluation_runs/results.py +++ /dev/null @@ -1,264 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.regions.evaluation_runs.result_retrieve_response import ResultRetrieveResponse -from ....types.regions.evaluation_runs.result_retrieve_prompt_response import ResultRetrievePromptResponse - -__all__ = ["ResultsResource", "AsyncResultsResource"] - - -class ResultsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ResultsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ResultsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ResultsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ResultsResourceWithStreamingResponse(self) - - def retrieve( - self, - evaluation_run_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResultRetrieveResponse: - """ - To retrieve results of an evaluation run, send a GET request to - `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not evaluation_run_uuid: - raise ValueError( - f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" - ) - return self._get( - f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ResultRetrieveResponse, - ) - - def retrieve_prompt( - self, - prompt_id: int, - *, - evaluation_run_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResultRetrievePromptResponse: - """ - To retrieve results of an evaluation run, send a GET request to - `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not evaluation_run_uuid: - raise ValueError( - f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" - ) - return self._get( - f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ResultRetrievePromptResponse, - ) - - -class AsyncResultsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncResultsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncResultsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncResultsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncResultsResourceWithStreamingResponse(self) - - async def retrieve( - self, - evaluation_run_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResultRetrieveResponse: - """ - To retrieve results of an evaluation run, send a GET request to - `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not evaluation_run_uuid: - raise ValueError( - f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" - ) - return await self._get( - f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ResultRetrieveResponse, - ) - - async def retrieve_prompt( - self, - prompt_id: int, - *, - evaluation_run_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ResultRetrievePromptResponse: - """ - To retrieve results of an evaluation run, send a GET request to - `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not evaluation_run_uuid: - raise ValueError( - f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" - ) - return await self._get( - f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ResultRetrievePromptResponse, - ) - - -class ResultsResourceWithRawResponse: - def __init__(self, results: ResultsResource) -> None: - self._results = results - - self.retrieve = to_raw_response_wrapper( - results.retrieve, - ) - self.retrieve_prompt = to_raw_response_wrapper( - results.retrieve_prompt, - ) - - -class AsyncResultsResourceWithRawResponse: - def __init__(self, results: AsyncResultsResource) -> None: - self._results = results - - self.retrieve = async_to_raw_response_wrapper( - results.retrieve, - ) - self.retrieve_prompt = async_to_raw_response_wrapper( - results.retrieve_prompt, - ) - - -class ResultsResourceWithStreamingResponse: - def __init__(self, results: ResultsResource) -> None: - self._results = results - - self.retrieve = to_streamed_response_wrapper( - results.retrieve, - ) - self.retrieve_prompt = to_streamed_response_wrapper( - results.retrieve_prompt, - ) - - -class AsyncResultsResourceWithStreamingResponse: - def __init__(self, results: AsyncResultsResource) -> None: - self._results = results - - self.retrieve = async_to_streamed_response_wrapper( - results.retrieve, - ) - self.retrieve_prompt = async_to_streamed_response_wrapper( - results.retrieve_prompt, - ) diff --git a/src/do_gradientai/resources/regions/regions.py b/src/do_gradientai/resources/regions/regions.py deleted file mode 100644 index 5f74b2e8..00000000 --- a/src/do_gradientai/resources/regions/regions.py +++ /dev/null @@ -1,352 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...types import region_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from .evaluation_datasets import ( - EvaluationDatasetsResource, - AsyncEvaluationDatasetsResource, - EvaluationDatasetsResourceWithRawResponse, - AsyncEvaluationDatasetsResourceWithRawResponse, - EvaluationDatasetsResourceWithStreamingResponse, - AsyncEvaluationDatasetsResourceWithStreamingResponse, -) -from .evaluation_test_cases import ( - EvaluationTestCasesResource, - AsyncEvaluationTestCasesResource, - EvaluationTestCasesResourceWithRawResponse, - AsyncEvaluationTestCasesResourceWithRawResponse, - EvaluationTestCasesResourceWithStreamingResponse, - AsyncEvaluationTestCasesResourceWithStreamingResponse, -) -from ...types.region_list_response import RegionListResponse -from .evaluation_runs.evaluation_runs import ( - EvaluationRunsResource, - AsyncEvaluationRunsResource, - EvaluationRunsResourceWithRawResponse, - AsyncEvaluationRunsResourceWithRawResponse, - EvaluationRunsResourceWithStreamingResponse, - AsyncEvaluationRunsResourceWithStreamingResponse, -) -from ...types.region_list_evaluation_metrics_response import RegionListEvaluationMetricsResponse - -__all__ = ["RegionsResource", "AsyncRegionsResource"] - - -class RegionsResource(SyncAPIResource): - @cached_property - def evaluation_runs(self) -> EvaluationRunsResource: - return EvaluationRunsResource(self._client) - - @cached_property - def evaluation_test_cases(self) -> EvaluationTestCasesResource: - return EvaluationTestCasesResource(self._client) - - @cached_property - def evaluation_datasets(self) -> EvaluationDatasetsResource: - return EvaluationDatasetsResource(self._client) - - @cached_property - def with_raw_response(self) -> RegionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return RegionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return RegionsResourceWithStreamingResponse(self) - - def list( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - region_list_params.RegionListParams, - ), - ), - cast_to=RegionListResponse, - ) - - def list_evaluation_metrics( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListEvaluationMetricsResponse: - """ - To list all evaluation metrics, send a GET request to - `/v2/gen-ai/evaluation_metrics`. - """ - return self._get( - "/v2/gen-ai/evaluation_metrics" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RegionListEvaluationMetricsResponse, - ) - - -class AsyncRegionsResource(AsyncAPIResource): - @cached_property - def evaluation_runs(self) -> AsyncEvaluationRunsResource: - return AsyncEvaluationRunsResource(self._client) - - @cached_property - def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource: - return AsyncEvaluationTestCasesResource(self._client) - - @cached_property - def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource: - return AsyncEvaluationDatasetsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRegionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncRegionsResourceWithStreamingResponse(self) - - async def list( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - region_list_params.RegionListParams, - ), - ), - cast_to=RegionListResponse, - ) - - async def list_evaluation_metrics( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListEvaluationMetricsResponse: - """ - To list all evaluation metrics, send a GET request to - `/v2/gen-ai/evaluation_metrics`. - """ - return await self._get( - "/v2/gen-ai/evaluation_metrics" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=RegionListEvaluationMetricsResponse, - ) - - -class RegionsResourceWithRawResponse: - def __init__(self, regions: RegionsResource) -> None: - self._regions = regions - - self.list = to_raw_response_wrapper( - regions.list, - ) - self.list_evaluation_metrics = to_raw_response_wrapper( - regions.list_evaluation_metrics, - ) - - @cached_property - def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse: - return EvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs) - - @cached_property - def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse: - return EvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases) - - @cached_property - def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse: - return EvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets) - - -class AsyncRegionsResourceWithRawResponse: - def __init__(self, regions: AsyncRegionsResource) -> None: - self._regions = regions - - self.list = async_to_raw_response_wrapper( - regions.list, - ) - self.list_evaluation_metrics = async_to_raw_response_wrapper( - regions.list_evaluation_metrics, - ) - - @cached_property - def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse: - return AsyncEvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs) - - @cached_property - def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: - return AsyncEvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases) - - @cached_property - def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: - return AsyncEvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets) - - -class RegionsResourceWithStreamingResponse: - def __init__(self, regions: RegionsResource) -> None: - self._regions = regions - - self.list = to_streamed_response_wrapper( - regions.list, - ) - self.list_evaluation_metrics = to_streamed_response_wrapper( - regions.list_evaluation_metrics, - ) - - @cached_property - def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse: - return EvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs) - - @cached_property - def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse: - return EvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases) - - @cached_property - def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse: - return EvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets) - - -class AsyncRegionsResourceWithStreamingResponse: - def __init__(self, regions: AsyncRegionsResource) -> None: - self._regions = regions - - self.list = async_to_streamed_response_wrapper( - regions.list, - ) - self.list_evaluation_metrics = async_to_streamed_response_wrapper( - regions.list_evaluation_metrics, - ) - - @cached_property - def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse: - return AsyncEvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs) - - @cached_property - def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse: - return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases) - - @cached_property - def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse: - return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets) diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py index d09aaa2a..e3c2ab9c 100644 --- a/src/do_gradientai/types/__init__.py +++ b/src/do_gradientai/types/__init__.py @@ -22,7 +22,6 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo @@ -45,9 +44,6 @@ from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .region_list_evaluation_metrics_response import ( - RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, -) from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py index aae0ee6b..7e100741 100644 --- a/src/do_gradientai/types/agents/__init__.py +++ b/src/do_gradientai/types/agents/__init__.py @@ -4,11 +4,16 @@ from .api_meta import APIMeta as APIMeta from .api_links import APILinks as APILinks +from .api_star_metric import APIStarMetric as APIStarMetric +from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun from .api_key_list_params import APIKeyListParams as APIKeyListParams from .version_list_params import VersionListParams as VersionListParams +from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric +from .api_evaluation_prompt import APIEvaluationPrompt as APIEvaluationPrompt from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams @@ -18,6 +23,7 @@ from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse +from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse from .function_create_response import FunctionCreateResponse as FunctionCreateResponse from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse @@ -27,5 +33,33 @@ from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse +from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult +from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput +from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse +from .evaluation_metric_list_response import EvaluationMetricListResponse as EvaluationMetricListResponse +from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams +from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse +from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse +from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams +from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse +from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams +from .evaluation_run_list_results_response import EvaluationRunListResultsResponse as EvaluationRunListResultsResponse +from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse +from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse +from .evaluation_test_case_retrieve_response import ( + EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, +) +from .evaluation_test_case_list_evaluation_runs_params import ( + EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams, +) +from .evaluation_test_case_list_evaluation_runs_response import ( + EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse, +) +from .evaluation_dataset_create_file_upload_presigned_urls_params import ( + EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams, +) +from .evaluation_dataset_create_file_upload_presigned_urls_response import ( + EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) diff --git a/src/do_gradientai/types/api_evaluation_metric.py b/src/do_gradientai/types/agents/api_evaluation_metric.py similarity index 95% rename from src/do_gradientai/types/api_evaluation_metric.py rename to src/do_gradientai/types/agents/api_evaluation_metric.py index 05390297..1aa85306 100644 --- a/src/do_gradientai/types/api_evaluation_metric.py +++ b/src/do_gradientai/types/agents/api_evaluation_metric.py @@ -3,7 +3,7 @@ from typing import Optional from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["APIEvaluationMetric"] diff --git a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py b/src/do_gradientai/types/agents/api_evaluation_metric_result.py similarity index 92% rename from src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py rename to src/do_gradientai/types/agents/api_evaluation_metric_result.py index cb50fd80..35146c00 100644 --- a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py +++ b/src/do_gradientai/types/agents/api_evaluation_metric_result.py @@ -2,7 +2,7 @@ from typing import Optional -from ...._models import BaseModel +from ..._models import BaseModel __all__ = ["APIEvaluationMetricResult"] diff --git a/src/do_gradientai/types/regions/evaluation_runs/api_prompt.py b/src/do_gradientai/types/agents/api_evaluation_prompt.py similarity index 90% rename from src/do_gradientai/types/regions/evaluation_runs/api_prompt.py rename to src/do_gradientai/types/agents/api_evaluation_prompt.py index fb5a51f4..750e62fb 100644 --- a/src/do_gradientai/types/regions/evaluation_runs/api_prompt.py +++ b/src/do_gradientai/types/agents/api_evaluation_prompt.py @@ -2,10 +2,10 @@ from typing import List, Optional -from ...._models import BaseModel +from ..._models import BaseModel from .api_evaluation_metric_result import APIEvaluationMetricResult -__all__ = ["APIPrompt", "PromptChunk"] +__all__ = ["APIEvaluationPrompt", "PromptChunk"] class PromptChunk(BaseModel): @@ -25,7 +25,7 @@ class PromptChunk(BaseModel): """Text content of the chunk.""" -class APIPrompt(BaseModel): +class APIEvaluationPrompt(BaseModel): ground_truth: Optional[str] = None """The ground truth for the prompt.""" diff --git a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py similarity index 97% rename from src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py rename to src/do_gradientai/types/agents/api_evaluation_run.py index 7822f53c..ae046d3e 100644 --- a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py +++ b/src/do_gradientai/types/agents/api_evaluation_run.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ...._models import BaseModel +from ..._models import BaseModel from .api_evaluation_metric_result import APIEvaluationMetricResult __all__ = ["APIEvaluationRun"] diff --git a/src/do_gradientai/types/regions/api_evaluation_test_case.py b/src/do_gradientai/types/agents/api_evaluation_test_case.py similarity index 94% rename from src/do_gradientai/types/regions/api_evaluation_test_case.py rename to src/do_gradientai/types/agents/api_evaluation_test_case.py index d799b0e0..09ce5e48 100644 --- a/src/do_gradientai/types/regions/api_evaluation_test_case.py +++ b/src/do_gradientai/types/agents/api_evaluation_test_case.py @@ -5,7 +5,7 @@ from ..._models import BaseModel from .api_star_metric import APIStarMetric -from ..api_evaluation_metric import APIEvaluationMetric +from .api_evaluation_metric import APIEvaluationMetric __all__ = ["APIEvaluationTestCase"] diff --git a/src/do_gradientai/types/regions/api_star_metric.py b/src/do_gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/do_gradientai/types/regions/api_star_metric.py rename to src/do_gradientai/types/agents/api_star_metric.py diff --git a/src/do_gradientai/types/regions/api_star_metric_param.py b/src/do_gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/do_gradientai/types/regions/api_star_metric_param.py rename to src/do_gradientai/types/agents/api_star_metric_param.py diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_dataset_create_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_dataset_create_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/do_gradientai/types/region_list_evaluation_metrics_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_response.py similarity index 63% rename from src/do_gradientai/types/region_list_evaluation_metrics_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_response.py index c57b71d1..0708f1ba 100644 --- a/src/do_gradientai/types/region_list_evaluation_metrics_response.py +++ b/src/do_gradientai/types/agents/evaluation_metric_list_response.py @@ -2,11 +2,11 @@ from typing import List, Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_evaluation_metric import APIEvaluationMetric -__all__ = ["RegionListEvaluationMetricsResponse"] +__all__ = ["EvaluationMetricListResponse"] -class RegionListEvaluationMetricsResponse(BaseModel): +class EvaluationMetricListResponse(BaseModel): metrics: Optional[List[APIEvaluationMetric]] = None diff --git a/src/do_gradientai/types/regions/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_run_create_params.py rename to src/do_gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/do_gradientai/types/regions/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_run_create_response.py rename to src/do_gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py similarity index 52% rename from src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_response.py index 27256353..f0a9882b 100644 --- a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py +++ b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py @@ -2,15 +2,15 @@ from typing import List, Optional -from ...._models import BaseModel -from .api_prompt import APIPrompt +from ..._models import BaseModel from .api_evaluation_run import APIEvaluationRun +from .api_evaluation_prompt import APIEvaluationPrompt -__all__ = ["ResultRetrieveResponse"] +__all__ = ["EvaluationRunListResultsResponse"] -class ResultRetrieveResponse(BaseModel): +class EvaluationRunListResultsResponse(BaseModel): evaluation_run: Optional[APIEvaluationRun] = None - prompts: Optional[List[APIPrompt]] = None + prompts: Optional[List[APIEvaluationPrompt]] = None """The prompt level results.""" diff --git a/src/do_gradientai/types/regions/evaluation_run_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 82% rename from src/do_gradientai/types/regions/evaluation_run_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_response.py index 68d71978..cedba220 100644 --- a/src/do_gradientai/types/regions/evaluation_run_retrieve_response.py +++ b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py @@ -3,7 +3,7 @@ from typing import Optional from ..._models import BaseModel -from .evaluation_runs.api_evaluation_run import APIEvaluationRun +from .api_evaluation_run import APIEvaluationRun __all__ = ["EvaluationRunRetrieveResponse"] diff --git a/src/do_gradientai/types/regions/evaluation_test_case_create_params.py b/src/do_gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_create_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/do_gradientai/types/regions/evaluation_test_case_create_response.py b/src/do_gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_create_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 85% rename from src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py index 4233d0ec..d9565e97 100644 --- a/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py +++ b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py @@ -3,7 +3,7 @@ from typing import List, Optional from ..._models import BaseModel -from .evaluation_runs.api_evaluation_run import APIEvaluationRun +from .api_evaluation_run import APIEvaluationRun __all__ = ["EvaluationTestCaseListEvaluationRunsResponse"] diff --git a/src/do_gradientai/types/regions/evaluation_test_case_list_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_list_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/do_gradientai/types/regions/evaluation_test_case_update_params.py b/src/do_gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_update_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/do_gradientai/types/regions/evaluation_test_case_update_response.py b/src/do_gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/do_gradientai/types/regions/evaluation_test_case_update_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/do_gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py index b170d504..83e59379 100644 --- a/src/do_gradientai/types/api_workspace.py +++ b/src/do_gradientai/types/api_workspace.py @@ -6,7 +6,7 @@ from datetime import datetime from .._models import BaseModel -from .regions.api_evaluation_test_case import APIEvaluationTestCase +from .agents.api_evaluation_test_case import APIEvaluationTestCase __all__ = ["APIWorkspace"] diff --git a/src/do_gradientai/types/regions/__init__.py b/src/do_gradientai/types/regions/__init__.py deleted file mode 100644 index 695ba3b4..00000000 --- a/src/do_gradientai/types/regions/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_star_metric import APIStarMetric as APIStarMetric -from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam -from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase -from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams -from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse -from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams -from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse -from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse -from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams -from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse -from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams -from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse -from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse -from .evaluation_test_case_retrieve_response import ( - EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, -) -from .evaluation_test_case_list_evaluation_runs_params import ( - EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams, -) -from .evaluation_test_case_list_evaluation_runs_response import ( - EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse, -) -from .evaluation_dataset_create_file_upload_presigned_urls_params import ( - EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams, -) -from .evaluation_dataset_create_file_upload_presigned_urls_response import ( - EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse, -) diff --git a/src/do_gradientai/types/regions/evaluation_runs/__init__.py b/src/do_gradientai/types/regions/evaluation_runs/__init__.py deleted file mode 100644 index 0ec4f2f6..00000000 --- a/src/do_gradientai/types/regions/evaluation_runs/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_prompt import APIPrompt as APIPrompt -from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun -from .result_retrieve_response import ResultRetrieveResponse as ResultRetrieveResponse -from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult -from .result_retrieve_prompt_response import ResultRetrievePromptResponse as ResultRetrievePromptResponse diff --git a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py b/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py deleted file mode 100644 index ebebee48..00000000 --- a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from .api_prompt import APIPrompt - -__all__ = ["ResultRetrievePromptResponse"] - - -class ResultRetrievePromptResponse(BaseModel): - prompt: Optional[APIPrompt] = None diff --git a/tests/api_resources/regions/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py similarity index 82% rename from tests/api_resources/regions/test_evaluation_datasets.py rename to tests/api_resources/agents/test_evaluation_datasets.py index 6e7a5e52..9e6dad52 100644 --- a/tests/api_resources/regions/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.regions import ( +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -23,13 +23,13 @@ class TestEvaluationDatasets: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - evaluation_dataset = client.regions.evaluation_datasets.create() + evaluation_dataset = client.agents.evaluation_datasets.create() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - evaluation_dataset = client.regions.evaluation_datasets.create( + evaluation_dataset = client.agents.evaluation_datasets.create( file_upload_dataset={ "original_file_name": "original_file_name", "size_in_bytes": "size_in_bytes", @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.regions.evaluation_datasets.with_raw_response.create() + response = client.agents.evaluation_datasets.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.regions.evaluation_datasets.with_streaming_response.create() as response: + with client.agents.evaluation_datasets.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> None: - evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls() + evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls() assert_matches_type( EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) @@ -72,7 +72,7 @@ def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize def test_method_create_file_upload_presigned_urls_with_all_params(self, client: GradientAI) -> None: - evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls( + evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { "file_name": "file_name", @@ -87,7 +87,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: @pytest.mark.skip() @parametrize def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: - response = client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() + response = client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -99,7 +99,7 @@ def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI @pytest.mark.skip() @parametrize def test_streaming_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: - with client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response: + with client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -119,13 +119,13 @@ class TestAsyncEvaluationDatasets: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - evaluation_dataset = await async_client.regions.evaluation_datasets.create() + evaluation_dataset = await async_client.agents.evaluation_datasets.create() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_dataset = await async_client.regions.evaluation_datasets.create( + evaluation_dataset = await async_client.agents.evaluation_datasets.create( file_upload_dataset={ "original_file_name": "original_file_name", "size_in_bytes": "size_in_bytes", @@ -138,7 +138,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_datasets.with_raw_response.create() + response = await async_client.agents.evaluation_datasets.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -148,7 +148,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_datasets.with_streaming_response.create() as response: + async with async_client.agents.evaluation_datasets.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -160,7 +160,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: - evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls() + evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls() assert_matches_type( EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) @@ -170,7 +170,7 @@ async def test_method_create_file_upload_presigned_urls(self, async_client: Asyn async def test_method_create_file_upload_presigned_urls_with_all_params( self, async_client: AsyncGradientAI ) -> None: - evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls( + evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { "file_name": "file_name", @@ -185,7 +185,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params( @pytest.mark.skip() @parametrize async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() + response = await async_client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -198,7 +198,7 @@ async def test_raw_response_create_file_upload_presigned_urls(self, async_client @parametrize async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: async with ( - async_client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() + async_client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py new file mode 100644 index 00000000..82084f61 --- /dev/null +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -0,0 +1,80 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import EvaluationMetricListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationMetrics: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + evaluation_metric = client.agents.evaluation_metrics.list() + assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_metric = response.parse() + assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_metric = response.parse() + assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncEvaluationMetrics: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + evaluation_metric = await async_client.agents.evaluation_metrics.list() + assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_metric = await response.parse() + assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_metric = await response.parse() + assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/regions/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py similarity index 56% rename from tests/api_resources/regions/test_evaluation_runs.py rename to tests/api_resources/agents/test_evaluation_runs.py index 09bf8525..721be2a0 100644 --- a/tests/api_resources/regions/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -9,9 +9,10 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.regions import ( +from do_gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, + EvaluationRunListResultsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -23,13 +24,13 @@ class TestEvaluationRuns: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - evaluation_run = client.regions.evaluation_runs.create() + evaluation_run = client.agents.evaluation_runs.create() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - evaluation_run = client.regions.evaluation_runs.create( + evaluation_run = client.agents.evaluation_runs.create( agent_uuid="agent_uuid", run_name="run_name", test_case_uuid="test_case_uuid", @@ -39,7 +40,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.regions.evaluation_runs.with_raw_response.create() + response = client.agents.evaluation_runs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -49,7 +50,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.regions.evaluation_runs.with_streaming_response.create() as response: + with client.agents.evaluation_runs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -61,7 +62,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - evaluation_run = client.regions.evaluation_runs.retrieve( + evaluation_run = client.agents.evaluation_runs.retrieve( "evaluation_run_uuid", ) assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) @@ -69,7 +70,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.regions.evaluation_runs.with_raw_response.retrieve( + response = client.agents.evaluation_runs.with_raw_response.retrieve( "evaluation_run_uuid", ) @@ -81,7 +82,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.regions.evaluation_runs.with_streaming_response.retrieve( + with client.agents.evaluation_runs.with_streaming_response.retrieve( "evaluation_run_uuid", ) as response: assert not response.is_closed @@ -96,7 +97,49 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): - client.regions.evaluation_runs.with_raw_response.retrieve( + client.agents.evaluation_runs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_results(self, client: GradientAI) -> None: + evaluation_run = client.agents.evaluation_runs.list_results( + "evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_results(self, client: GradientAI) -> None: + response = client.agents.evaluation_runs.with_raw_response.list_results( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = response.parse() + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_results(self, client: GradientAI) -> None: + with client.agents.evaluation_runs.with_streaming_response.list_results( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = response.parse() + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_results(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.agents.evaluation_runs.with_raw_response.list_results( "", ) @@ -109,13 +152,13 @@ class TestAsyncEvaluationRuns: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - evaluation_run = await async_client.regions.evaluation_runs.create() + evaluation_run = await async_client.agents.evaluation_runs.create() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_run = await async_client.regions.evaluation_runs.create( + evaluation_run = await async_client.agents.evaluation_runs.create( agent_uuid="agent_uuid", run_name="run_name", test_case_uuid="test_case_uuid", @@ -125,7 +168,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_runs.with_raw_response.create() + response = await async_client.agents.evaluation_runs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -135,7 +178,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_runs.with_streaming_response.create() as response: + async with async_client.agents.evaluation_runs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -147,7 +190,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - evaluation_run = await async_client.regions.evaluation_runs.retrieve( + evaluation_run = await async_client.agents.evaluation_runs.retrieve( "evaluation_run_uuid", ) assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) @@ -155,7 +198,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_runs.with_raw_response.retrieve( + response = await async_client.agents.evaluation_runs.with_raw_response.retrieve( "evaluation_run_uuid", ) @@ -167,7 +210,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_runs.with_streaming_response.retrieve( + async with async_client.agents.evaluation_runs.with_streaming_response.retrieve( "evaluation_run_uuid", ) as response: assert not response.is_closed @@ -182,6 +225,48 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): - await async_client.regions.evaluation_runs.with_raw_response.retrieve( + await async_client.agents.evaluation_runs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.agents.evaluation_runs.list_results( + "evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_runs.with_raw_response.list_results( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_results(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_runs.with_streaming_response.list_results( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.agents.evaluation_runs.with_raw_response.list_results( "", ) diff --git a/tests/api_resources/regions/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py similarity index 82% rename from tests/api_resources/regions/test_evaluation_test_cases.py rename to tests/api_resources/agents/test_evaluation_test_cases.py index 7cc18835..50b285bd 100644 --- a/tests/api_resources/regions/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.regions import ( +from do_gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, @@ -26,13 +26,13 @@ class TestEvaluationTestCases: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.create() + evaluation_test_case = client.agents.evaluation_test_cases.create() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.create( + evaluation_test_case = client.agents.evaluation_test_cases.create( dataset_uuid="dataset_uuid", description="description", metrics=["string"], @@ -49,7 +49,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.regions.evaluation_test_cases.with_raw_response.create() + response = client.agents.evaluation_test_cases.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.regions.evaluation_test_cases.with_streaming_response.create() as response: + with client.agents.evaluation_test_cases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -71,7 +71,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.retrieve( + evaluation_test_case = client.agents.evaluation_test_cases.retrieve( "test_case_uuid", ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -79,7 +79,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.regions.evaluation_test_cases.with_raw_response.retrieve( + response = client.agents.evaluation_test_cases.with_raw_response.retrieve( "test_case_uuid", ) @@ -91,7 +91,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.regions.evaluation_test_cases.with_streaming_response.retrieve( + with client.agents.evaluation_test_cases.with_streaming_response.retrieve( "test_case_uuid", ) as response: assert not response.is_closed @@ -106,14 +106,14 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): - client.regions.evaluation_test_cases.with_raw_response.retrieve( + client.agents.evaluation_test_cases.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.update( + evaluation_test_case = client.agents.evaluation_test_cases.update( path_test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -121,7 +121,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.update( + evaluation_test_case = client.agents.evaluation_test_cases.update( path_test_case_uuid="test_case_uuid", dataset_uuid="dataset_uuid", description="description", @@ -139,7 +139,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.regions.evaluation_test_cases.with_raw_response.update( + response = client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid="test_case_uuid", ) @@ -151,7 +151,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.regions.evaluation_test_cases.with_streaming_response.update( + with client.agents.evaluation_test_cases.with_streaming_response.update( path_test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed @@ -166,20 +166,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): - client.regions.evaluation_test_cases.with_raw_response.update( + client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.list() + evaluation_test_case = client.agents.evaluation_test_cases.list() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.regions.evaluation_test_cases.with_raw_response.list() + response = client.agents.evaluation_test_cases.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,7 +189,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.regions.evaluation_test_cases.with_streaming_response.list() as response: + with client.agents.evaluation_test_cases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -201,7 +201,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_evaluation_runs(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -209,7 +209,7 @@ def test_method_list_evaluation_runs(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: - evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", evaluation_test_case_version=0, ) @@ -218,7 +218,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) - @pytest.mark.skip() @parametrize def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: - response = client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", ) @@ -230,7 +230,7 @@ def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: - with client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs( + with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", ) as response: assert not response.is_closed @@ -247,7 +247,7 @@ def test_path_params_list_evaluation_runs(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" ): - client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( evaluation_test_case_uuid="", ) @@ -260,13 +260,13 @@ class TestAsyncEvaluationTestCases: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.create() + evaluation_test_case = await async_client.agents.evaluation_test_cases.create() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.create( + evaluation_test_case = await async_client.agents.evaluation_test_cases.create( dataset_uuid="dataset_uuid", description="description", metrics=["string"], @@ -283,7 +283,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_test_cases.with_raw_response.create() + response = await async_client.agents.evaluation_test_cases.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -293,7 +293,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_test_cases.with_streaming_response.create() as response: + async with async_client.agents.evaluation_test_cases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -305,7 +305,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.retrieve( + evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( "test_case_uuid", ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -313,7 +313,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_test_cases.with_raw_response.retrieve( + response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( "test_case_uuid", ) @@ -325,7 +325,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_test_cases.with_streaming_response.retrieve( + async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( "test_case_uuid", ) as response: assert not response.is_closed @@ -340,14 +340,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): - await async_client.regions.evaluation_test_cases.with_raw_response.retrieve( + await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.update( + evaluation_test_case = await async_client.agents.evaluation_test_cases.update( path_test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -355,7 +355,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.update( + evaluation_test_case = await async_client.agents.evaluation_test_cases.update( path_test_case_uuid="test_case_uuid", dataset_uuid="dataset_uuid", description="description", @@ -373,7 +373,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_test_cases.with_raw_response.update( + response = await async_client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid="test_case_uuid", ) @@ -385,7 +385,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_test_cases.with_streaming_response.update( + async with async_client.agents.evaluation_test_cases.with_streaming_response.update( path_test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed @@ -400,20 +400,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): - await async_client.regions.evaluation_test_cases.with_raw_response.update( + await async_client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.list() + evaluation_test_case = await async_client.agents.evaluation_test_cases.list() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_test_cases.with_raw_response.list() + response = await async_client.agents.evaluation_test_cases.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -423,7 +423,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_test_cases.with_streaming_response.list() as response: + async with async_client.agents.evaluation_test_cases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -435,7 +435,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -443,7 +443,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", evaluation_test_case_version=0, ) @@ -452,7 +452,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A @pytest.mark.skip() @parametrize async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", ) @@ -464,7 +464,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs( + async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( evaluation_test_case_uuid="evaluation_test_case_uuid", ) as response: assert not response.is_closed @@ -481,6 +481,6 @@ async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradien with pytest.raises( ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" ): - await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( evaluation_test_case_uuid="", ) diff --git a/tests/api_resources/regions/__init__.py b/tests/api_resources/regions/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/regions/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/regions/evaluation_runs/__init__.py b/tests/api_resources/regions/evaluation_runs/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/regions/evaluation_runs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/regions/evaluation_runs/test_results.py b/tests/api_resources/regions/evaluation_runs/test_results.py deleted file mode 100644 index e4b906bd..00000000 --- a/tests/api_resources/regions/evaluation_runs/test_results.py +++ /dev/null @@ -1,200 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestResults: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - result = client.regions.evaluation_runs.results.retrieve( - "evaluation_run_uuid", - ) - assert_matches_type(ResultRetrieveResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.regions.evaluation_runs.results.with_raw_response.retrieve( - "evaluation_run_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - result = response.parse() - assert_matches_type(ResultRetrieveResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.regions.evaluation_runs.results.with_streaming_response.retrieve( - "evaluation_run_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - result = response.parse() - assert_matches_type(ResultRetrieveResponse, result, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): - client.regions.evaluation_runs.results.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_prompt(self, client: GradientAI) -> None: - result = client.regions.evaluation_runs.results.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", - ) - assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_prompt(self, client: GradientAI) -> None: - response = client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - result = response.parse() - assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_prompt(self, client: GradientAI) -> None: - with client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - result = response.parse() - assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_prompt(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): - client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="", - ) - - -class TestAsyncResults: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - result = await async_client.regions.evaluation_runs.results.retrieve( - "evaluation_run_uuid", - ) - assert_matches_type(ResultRetrieveResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve( - "evaluation_run_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - result = await response.parse() - assert_matches_type(ResultRetrieveResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve( - "evaluation_run_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - result = await response.parse() - assert_matches_type(ResultRetrieveResponse, result, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): - await async_client.regions.evaluation_runs.results.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: - result = await async_client.regions.evaluation_runs.results.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", - ) - assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - result = await response.parse() - assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - result = await response.parse() - assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): - await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( - prompt_id=0, - evaluation_run_uuid="", - ) diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index bf51ef96..4ed5bb27 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse +from do_gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -54,34 +54,6 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() - @parametrize - def test_method_list_evaluation_metrics(self, client: GradientAI) -> None: - region = client.regions.list_evaluation_metrics() - assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_evaluation_metrics(self, client: GradientAI) -> None: - response = client.regions.with_raw_response.list_evaluation_metrics() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - region = response.parse() - assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_evaluation_metrics(self, client: GradientAI) -> None: - with client.regions.with_streaming_response.list_evaluation_metrics() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - region = response.parse() - assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) - - assert cast(Any, response.is_closed) is True - class TestAsyncRegions: parametrize = pytest.mark.parametrize( @@ -124,31 +96,3 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(RegionListResponse, region, path=["response"]) assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: - region = await async_client.regions.list_evaluation_metrics() - assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: - response = await async_client.regions.with_raw_response.list_evaluation_metrics() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - region = await response.parse() - assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: - async with async_client.regions.with_streaming_response.list_evaluation_metrics() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - region = await response.parse() - assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) - - assert cast(Any, response.is_closed) is True From a13595be232d8599e4c61f398cf6b9bc4f52a160 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 03:21:34 +0000 Subject: [PATCH 066/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 13 - .../resources/inference/__init__.py | 14 -- .../resources/inference/inference.py | 32 --- .../resources/inference/models.py | 226 ------------------ src/do_gradientai/types/inference/__init__.py | 2 - src/do_gradientai/types/inference/model.py | 21 -- .../types/inference/model_list_response.py | 15 -- tests/api_resources/inference/test_models.py | 164 ------------- 9 files changed, 2 insertions(+), 489 deletions(-) delete mode 100644 src/do_gradientai/resources/inference/models.py delete mode 100644 src/do_gradientai/types/inference/model.py delete mode 100644 src/do_gradientai/types/inference/model_list_response.py delete mode 100644 tests/api_resources/inference/test_models.py diff --git a/.stats.yml b/.stats.yml index 49720dd2..cb95a5af 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 69 +configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 190bed33fe275347e4871077b32af63f +config_hash: 886acf2e0eda98b9a718598587f7f81e diff --git a/api.md b/api.md index 018742d7..a7297098 100644 --- a/api.md +++ b/api.md @@ -365,19 +365,6 @@ Methods: - client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse - client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse -## Models - -Types: - -```python -from do_gradientai.types.inference import Model, ModelListResponse -``` - -Methods: - -- client.inference.models.retrieve(model) -> Model -- client.inference.models.list() -> ModelListResponse - # Models Types: diff --git a/src/do_gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py index 0e5631ce..21798ab2 100644 --- a/src/do_gradientai/resources/inference/__init__.py +++ b/src/do_gradientai/resources/inference/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -32,12 +24,6 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", "InferenceResource", "AsyncInferenceResource", "InferenceResourceWithRawResponse", diff --git a/src/do_gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py index 209d6f17..a144bae0 100644 --- a/src/do_gradientai/resources/inference/inference.py +++ b/src/do_gradientai/resources/inference/inference.py @@ -2,14 +2,6 @@ from __future__ import annotations -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -29,10 +21,6 @@ class InferenceResource(SyncAPIResource): def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) - @cached_property - def models(self) -> ModelsResource: - return ModelsResource(self._client) - @cached_property def with_raw_response(self) -> InferenceResourceWithRawResponse: """ @@ -58,10 +46,6 @@ class AsyncInferenceResource(AsyncAPIResource): def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) - @cached_property - def models(self) -> AsyncModelsResource: - return AsyncModelsResource(self._client) - @cached_property def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: """ @@ -90,10 +74,6 @@ def __init__(self, inference: InferenceResource) -> None: def api_keys(self) -> APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._inference.api_keys) - @cached_property - def models(self) -> ModelsResourceWithRawResponse: - return ModelsResourceWithRawResponse(self._inference.models) - class AsyncInferenceResourceWithRawResponse: def __init__(self, inference: AsyncInferenceResource) -> None: @@ -103,10 +83,6 @@ def __init__(self, inference: AsyncInferenceResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._inference.api_keys) - @cached_property - def models(self) -> AsyncModelsResourceWithRawResponse: - return AsyncModelsResourceWithRawResponse(self._inference.models) - class InferenceResourceWithStreamingResponse: def __init__(self, inference: InferenceResource) -> None: @@ -116,10 +92,6 @@ def __init__(self, inference: InferenceResource) -> None: def api_keys(self) -> APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._inference.api_keys) - @cached_property - def models(self) -> ModelsResourceWithStreamingResponse: - return ModelsResourceWithStreamingResponse(self._inference.models) - class AsyncInferenceResourceWithStreamingResponse: def __init__(self, inference: AsyncInferenceResource) -> None: @@ -128,7 +100,3 @@ def __init__(self, inference: AsyncInferenceResource) -> None: @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._inference.api_keys) - - @cached_property - def models(self) -> AsyncModelsResourceWithStreamingResponse: - return AsyncModelsResourceWithStreamingResponse(self._inference.models) diff --git a/src/do_gradientai/resources/inference/models.py b/src/do_gradientai/resources/inference/models.py deleted file mode 100644 index 42e1dcb2..00000000 --- a/src/do_gradientai/resources/inference/models.py +++ /dev/null @@ -1,226 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.inference.model import Model -from ...types.inference.model_list_response import ModelListResponse - -__all__ = ["ModelsResource", "AsyncModelsResource"] - - -class ModelsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ModelsResourceWithStreamingResponse(self) - - def retrieve( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: - """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return self._get( - f"/models/{model}" - if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - - def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelListResponse, - ) - - -class AsyncModelsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncModelsResourceWithStreamingResponse(self) - - async def retrieve( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: - """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return await self._get( - f"/models/{model}" - if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - - async def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return await self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelListResponse, - ) - - -class ModelsResourceWithRawResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.retrieve = to_raw_response_wrapper( - models.retrieve, - ) - self.list = to_raw_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithRawResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.retrieve = async_to_raw_response_wrapper( - models.retrieve, - ) - self.list = async_to_raw_response_wrapper( - models.list, - ) - - -class ModelsResourceWithStreamingResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.retrieve = to_streamed_response_wrapper( - models.retrieve, - ) - self.list = to_streamed_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithStreamingResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.retrieve = async_to_streamed_response_wrapper( - models.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - models.list, - ) diff --git a/src/do_gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py index 829340d7..c3cbcd6d 100644 --- a/src/do_gradientai/types/inference/__init__.py +++ b/src/do_gradientai/types/inference/__init__.py @@ -2,9 +2,7 @@ from __future__ import annotations -from .model import Model as Model from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .model_list_response import ModelListResponse as ModelListResponse from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams diff --git a/src/do_gradientai/types/inference/model.py b/src/do_gradientai/types/inference/model.py deleted file mode 100644 index ed8843e8..00000000 --- a/src/do_gradientai/types/inference/model.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["Model"] - - -class Model(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" diff --git a/src/do_gradientai/types/inference/model_list_response.py b/src/do_gradientai/types/inference/model_list_response.py deleted file mode 100644 index 01bf3b62..00000000 --- a/src/do_gradientai/types/inference/model_list_response.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from .model import Model -from ..._models import BaseModel - -__all__ = ["ModelListResponse"] - - -class ModelListResponse(BaseModel): - data: List[Model] - - object: Literal["list"] diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py deleted file mode 100644 index e930d83f..00000000 --- a/tests/api_resources/inference/test_models.py +++ /dev/null @@ -1,164 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.inference import Model, ModelListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestModels: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - model = client.inference.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.inference.models.with_raw_response.retrieve( - "llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.inference.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.inference.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.inference.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.inference.models.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.inference.models.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncModels: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - model = await async_client.inference.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.inference.models.with_raw_response.retrieve( - "llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.inference.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.inference.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.inference.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.inference.models.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.inference.models.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True From 7f0b64de2cbd5edca851e80b0eb69f553eb3ffd6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 03:21:55 +0000 Subject: [PATCH 067/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index cb95a5af..e9d82b51 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 886acf2e0eda98b9a718598587f7f81e +config_hash: e178baf496088c521dd245cbc46c932a From f7801badfec2829da0abe6b74df665ee6ef35d03 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 03:22:33 +0000 Subject: [PATCH 068/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 56 +- src/do_gradientai/_client.py | 41 +- src/do_gradientai/resources/__init__.py | 14 - .../resources/models/__init__.py | 19 + .../resources/{ => models}/models.py | 16 +- .../resources/providers/__init__.py | 47 -- .../resources/providers/anthropic/__init__.py | 33 - .../providers/anthropic/anthropic.py | 102 --- .../resources/providers/anthropic/keys.py | 686 ------------------ .../resources/providers/openai/__init__.py | 33 - .../resources/providers/openai/keys.py | 682 ----------------- .../resources/providers/openai/openai.py | 102 --- .../resources/providers/providers.py | 134 ---- .../types/{providers => models}/__init__.py | 0 .../types/providers/anthropic/__init__.py | 14 - .../providers/anthropic/key_create_params.py | 13 - .../anthropic/key_create_response.py | 12 - .../anthropic/key_delete_response.py | 12 - .../anthropic/key_list_agents_params.py | 15 - .../anthropic/key_list_agents_response.py | 22 - .../providers/anthropic/key_list_params.py | 15 - .../providers/anthropic/key_list_response.py | 18 - .../anthropic/key_retrieve_response.py | 12 - .../providers/anthropic/key_update_params.py | 17 - .../anthropic/key_update_response.py | 12 - .../types/providers/openai/__init__.py | 14 - .../providers/openai/key_create_params.py | 13 - .../providers/openai/key_create_response.py | 12 - .../providers/openai/key_delete_response.py | 12 - .../types/providers/openai/key_list_params.py | 15 - .../providers/openai/key_list_response.py | 18 - .../openai/key_retrieve_agents_params.py | 15 - .../openai/key_retrieve_agents_response.py | 22 - .../providers/openai/key_retrieve_response.py | 12 - .../providers/openai/key_update_params.py | 17 - .../providers/openai/key_update_response.py | 12 - .../{providers => models}/__init__.py | 0 .../providers/anthropic/__init__.py | 1 - .../providers/anthropic/test_keys.py | 557 -------------- .../providers/openai/__init__.py | 1 - .../providers/openai/test_keys.py | 557 -------------- 42 files changed, 32 insertions(+), 3377 deletions(-) create mode 100644 src/do_gradientai/resources/models/__init__.py rename src/do_gradientai/resources/{ => models}/models.py (95%) delete mode 100644 src/do_gradientai/resources/providers/__init__.py delete mode 100644 src/do_gradientai/resources/providers/anthropic/__init__.py delete mode 100644 src/do_gradientai/resources/providers/anthropic/anthropic.py delete mode 100644 src/do_gradientai/resources/providers/anthropic/keys.py delete mode 100644 src/do_gradientai/resources/providers/openai/__init__.py delete mode 100644 src/do_gradientai/resources/providers/openai/keys.py delete mode 100644 src/do_gradientai/resources/providers/openai/openai.py delete mode 100644 src/do_gradientai/resources/providers/providers.py rename src/do_gradientai/types/{providers => models}/__init__.py (100%) delete mode 100644 src/do_gradientai/types/providers/anthropic/__init__.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_create_params.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_create_response.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_delete_response.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_params.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_response.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_params.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_response.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_retrieve_response.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_update_params.py delete mode 100644 src/do_gradientai/types/providers/anthropic/key_update_response.py delete mode 100644 src/do_gradientai/types/providers/openai/__init__.py delete mode 100644 src/do_gradientai/types/providers/openai/key_create_params.py delete mode 100644 src/do_gradientai/types/providers/openai/key_create_response.py delete mode 100644 src/do_gradientai/types/providers/openai/key_delete_response.py delete mode 100644 src/do_gradientai/types/providers/openai/key_list_params.py delete mode 100644 src/do_gradientai/types/providers/openai/key_list_response.py delete mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py delete mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py delete mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_response.py delete mode 100644 src/do_gradientai/types/providers/openai/key_update_params.py delete mode 100644 src/do_gradientai/types/providers/openai/key_update_response.py rename tests/api_resources/{providers => models}/__init__.py (100%) delete mode 100644 tests/api_resources/providers/anthropic/__init__.py delete mode 100644 tests/api_resources/providers/anthropic/test_keys.py delete mode 100644 tests/api_resources/providers/openai/__init__.py delete mode 100644 tests/api_resources/providers/openai/test_keys.py diff --git a/.stats.yml b/.stats.yml index e9d82b51..645d4148 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 67 +configured_endpoints: 55 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: e178baf496088c521dd245cbc46c932a +config_hash: ed552d382f42c2e579a4bb0a608e2055 diff --git a/api.md b/api.md index a7297098..9811559a 100644 --- a/api.md +++ b/api.md @@ -191,60 +191,6 @@ Methods: - client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse - client.agents.child_agents.view(uuid) -> ChildAgentViewResponse -# Providers - -## Anthropic - -### Keys - -Types: - -```python -from do_gradientai.types.providers.anthropic import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyListAgentsResponse, -) -``` - -Methods: - -- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse - -## OpenAI - -### Keys - -Types: - -```python -from do_gradientai.types.providers.openai import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyRetrieveAgentsResponse, -) -``` - -Methods: - -- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse - # Regions Types: @@ -375,4 +321,4 @@ from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelLi Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index 8710fe68..afd18a26 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -31,14 +31,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases - from .resources.models import ModelsResource, AsyncModelsResource + from .resources import chat, agents, models, regions, inference, indexing_jobs, knowledge_bases from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource - from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ @@ -115,12 +114,6 @@ def agents(self) -> AgentsResource: return AgentsResource(self) - @cached_property - def providers(self) -> ProvidersResource: - from .resources.providers import ProvidersResource - - return ProvidersResource(self) - @cached_property def regions(self) -> RegionsResource: from .resources.regions import RegionsResource @@ -334,12 +327,6 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) - @cached_property - def providers(self) -> AsyncProvidersResource: - from .resources.providers import AsyncProvidersResource - - return AsyncProvidersResource(self) - @cached_property def regions(self) -> AsyncRegionsResource: from .resources.regions import AsyncRegionsResource @@ -503,12 +490,6 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) - @cached_property - def providers(self) -> providers.ProvidersResourceWithRawResponse: - from .resources.providers import ProvidersResourceWithRawResponse - - return ProvidersResourceWithRawResponse(self._client.providers) - @cached_property def regions(self) -> regions.RegionsResourceWithRawResponse: from .resources.regions import RegionsResourceWithRawResponse @@ -558,12 +539,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) - @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: - from .resources.providers import AsyncProvidersResourceWithRawResponse - - return AsyncProvidersResourceWithRawResponse(self._client.providers) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: from .resources.regions import AsyncRegionsResourceWithRawResponse @@ -613,12 +588,6 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) - @cached_property - def providers(self) -> providers.ProvidersResourceWithStreamingResponse: - from .resources.providers import ProvidersResourceWithStreamingResponse - - return ProvidersResourceWithStreamingResponse(self._client.providers) - @cached_property def regions(self) -> regions.RegionsResourceWithStreamingResponse: from .resources.regions import RegionsResourceWithStreamingResponse @@ -668,12 +637,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) - @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: - from .resources.providers import AsyncProvidersResourceWithStreamingResponse - - return AsyncProvidersResourceWithStreamingResponse(self._client.providers) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: from .resources.regions import AsyncRegionsResourceWithStreamingResponse diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index 1763a13e..b074f7d1 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -40,14 +40,6 @@ InferenceResourceWithStreamingResponse, AsyncInferenceResourceWithStreamingResponse, ) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) from .indexing_jobs import ( IndexingJobsResource, AsyncIndexingJobsResource, @@ -72,12 +64,6 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", diff --git a/src/do_gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py new file mode 100644 index 00000000..7a5c25cc --- /dev/null +++ b/src/do_gradientai/resources/models/__init__.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) + +__all__ = [ + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/models.py b/src/do_gradientai/resources/models/models.py similarity index 95% rename from src/do_gradientai/resources/models.py rename to src/do_gradientai/resources/models/models.py index c8e78b9b..acdd45a1 100644 --- a/src/do_gradientai/resources/models.py +++ b/src/do_gradientai/resources/models/models.py @@ -7,19 +7,19 @@ import httpx -from ..types import model_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ...types import model_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.model_list_response import ModelListResponse +from ..._base_client import make_request_options +from ...types.model_list_response import ModelListResponse __all__ = ["ModelsResource", "AsyncModelsResource"] diff --git a/src/do_gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/providers/__init__.py deleted file mode 100644 index 1731e057..00000000 --- a/src/do_gradientai/resources/providers/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) - -__all__ = [ - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", -] diff --git a/src/do_gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/providers/anthropic/__init__.py deleted file mode 100644 index 057a3a2f..00000000 --- a/src/do_gradientai/resources/providers/anthropic/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", -] diff --git a/src/do_gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/providers/anthropic/anthropic.py deleted file mode 100644 index 23a914e9..00000000 --- a/src/do_gradientai/resources/providers/anthropic/anthropic.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["AnthropicResource", "AsyncAnthropicResource"] - - -class AnthropicResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AnthropicResourceWithStreamingResponse(self) - - -class AsyncAnthropicResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncAnthropicResourceWithStreamingResponse(self) - - -class AnthropicResourceWithRawResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithRawResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._anthropic.keys) - - -class AnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/do_gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/providers/anthropic/keys.py deleted file mode 100644 index d1a33290..00000000 --- a/src/do_gradientai/resources/providers/anthropic/keys.py +++ /dev/null @@ -1,686 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params -from ....types.providers.anthropic.key_list_response import KeyListResponse -from ....types.providers.anthropic.key_create_response import KeyCreateResponse -from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse -from ....types.providers.anthropic.key_update_response import KeyUpdateResponse -from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an Anthropic API key, send a POST request to - `/v2/gen-ai/anthropic/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/gen-ai/anthropic/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", - body=maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an Anthropic API key, send a GET request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._get( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an Anthropic API key, send a PUT request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", - body=maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all Anthropic API keys, send a GET request to - `/v2/gen-ai/anthropic/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/anthropic/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an Anthropic API key, send a DELETE request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - def list_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: - """ - List Agents by Anthropic Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/gen-ai/anthropic/keys/{uuid}/agents" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_agents_params.KeyListAgentsParams, - ), - ), - cast_to=KeyListAgentsResponse, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an Anthropic API key, send a POST request to - `/v2/gen-ai/anthropic/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/gen-ai/anthropic/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", - body=await async_maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an Anthropic API key, send a GET request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._get( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an Anthropic API key, send a PUT request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all Anthropic API keys, send a GET request to - `/v2/gen-ai/anthropic/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/anthropic/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an Anthropic API key, send a DELETE request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - async def list_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: - """ - List Agents by Anthropic Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/gen-ai/anthropic/keys/{uuid}/agents" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_agents_params.KeyListAgentsParams, - ), - ), - cast_to=KeyListAgentsResponse, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - self.list_agents = to_raw_response_wrapper( - keys.list_agents, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - self.list_agents = async_to_raw_response_wrapper( - keys.list_agents, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - self.list_agents = to_streamed_response_wrapper( - keys.list_agents, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) - self.list_agents = async_to_streamed_response_wrapper( - keys.list_agents, - ) diff --git a/src/do_gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/providers/openai/__init__.py deleted file mode 100644 index 66d8ca7a..00000000 --- a/src/do_gradientai/resources/providers/openai/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", -] diff --git a/src/do_gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/providers/openai/keys.py deleted file mode 100644 index 01cfee75..00000000 --- a/src/do_gradientai/resources/providers/openai/keys.py +++ /dev/null @@ -1,682 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params -from ....types.providers.openai.key_list_response import KeyListResponse -from ....types.providers.openai.key_create_response import KeyCreateResponse -from ....types.providers.openai.key_delete_response import KeyDeleteResponse -from ....types.providers.openai.key_update_response import KeyUpdateResponse -from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/gen-ai/openai/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/openai/keys", - body=maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an OpenAI API key, send a GET request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._get( - f"/v2/gen-ai/openai/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an OpenAI API key, send a PUT request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", - body=maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/openai/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/openai/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an OpenAI API key, send a DELETE request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/gen-ai/openai/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - def retrieve_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: - """ - List Agents by OpenAI Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/gen-ai/openai/keys/{uuid}/agents" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, - ), - ), - cast_to=KeyRetrieveAgentsResponse, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/gen-ai/openai/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/openai/keys", - body=await async_maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an OpenAI API key, send a GET request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._get( - f"/v2/gen-ai/openai/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an OpenAI API key, send a PUT request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/openai/keys" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/openai/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an OpenAI API key, send a DELETE request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/gen-ai/openai/keys/{api_key_uuid}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - async def retrieve_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: - """ - List Agents by OpenAI Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/gen-ai/openai/keys/{uuid}/agents" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, - ), - ), - cast_to=KeyRetrieveAgentsResponse, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - self.retrieve_agents = to_raw_response_wrapper( - keys.retrieve_agents, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - self.retrieve_agents = async_to_raw_response_wrapper( - keys.retrieve_agents, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - self.retrieve_agents = to_streamed_response_wrapper( - keys.retrieve_agents, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) - self.retrieve_agents = async_to_streamed_response_wrapper( - keys.retrieve_agents, - ) diff --git a/src/do_gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/providers/openai/openai.py deleted file mode 100644 index b02dc2e1..00000000 --- a/src/do_gradientai/resources/providers/openai/openai.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["OpenAIResource", "AsyncOpenAIResource"] - - -class OpenAIResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> OpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return OpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return OpenAIResourceWithStreamingResponse(self) - - -class AsyncOpenAIResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncOpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncOpenAIResourceWithStreamingResponse(self) - - -class OpenAIResourceWithRawResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithRawResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._openai.keys) - - -class OpenAIResourceWithStreamingResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithStreamingResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/do_gradientai/resources/providers/providers.py b/src/do_gradientai/resources/providers/providers.py deleted file mode 100644 index ef942f73..00000000 --- a/src/do_gradientai/resources/providers/providers.py +++ /dev/null @@ -1,134 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .openai.openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) -from .anthropic.anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = ["ProvidersResource", "AsyncProvidersResource"] - - -class ProvidersResource(SyncAPIResource): - @cached_property - def anthropic(self) -> AnthropicResource: - return AnthropicResource(self._client) - - @cached_property - def openai(self) -> OpenAIResource: - return OpenAIResource(self._client) - - @cached_property - def with_raw_response(self) -> ProvidersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ProvidersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ProvidersResourceWithStreamingResponse(self) - - -class AsyncProvidersResource(AsyncAPIResource): - @cached_property - def anthropic(self) -> AsyncAnthropicResource: - return AsyncAnthropicResource(self._client) - - @cached_property - def openai(self) -> AsyncOpenAIResource: - return AsyncOpenAIResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncProvidersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncProvidersResourceWithStreamingResponse(self) - - -class ProvidersResourceWithRawResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AnthropicResourceWithRawResponse: - return AnthropicResourceWithRawResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> OpenAIResourceWithRawResponse: - return OpenAIResourceWithRawResponse(self._providers.openai) - - -class AsyncProvidersResourceWithRawResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: - return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> AsyncOpenAIResourceWithRawResponse: - return AsyncOpenAIResourceWithRawResponse(self._providers.openai) - - -class ProvidersResourceWithStreamingResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AnthropicResourceWithStreamingResponse: - return AnthropicResourceWithStreamingResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> OpenAIResourceWithStreamingResponse: - return OpenAIResourceWithStreamingResponse(self._providers.openai) - - -class AsyncProvidersResourceWithStreamingResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: - return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: - return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/do_gradientai/types/providers/__init__.py b/src/do_gradientai/types/models/__init__.py similarity index 100% rename from src/do_gradientai/types/providers/__init__.py rename to src/do_gradientai/types/models/__init__.py diff --git a/src/do_gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/providers/anthropic/__init__.py deleted file mode 100644 index eb47e709..00000000 --- a/src/do_gradientai/types/providers/anthropic/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams -from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/do_gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/providers/anthropic/key_create_params.py deleted file mode 100644 index 389f167c..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - api_key: str - - name: str diff --git a/src/do_gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/providers/anthropic/key_create_response.py deleted file mode 100644 index a032810c..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyCreateResponse"] - - -class KeyCreateResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/providers/anthropic/key_delete_response.py deleted file mode 100644 index 2afe2dda..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyDeleteResponse"] - - -class KeyDeleteResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py deleted file mode 100644 index ebbc3b7e..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListAgentsParams"] - - -class KeyListAgentsParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py deleted file mode 100644 index ba6ca946..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks - -__all__ = ["KeyListAgentsResponse"] - - -class KeyListAgentsResponse(BaseModel): - agents: Optional[List["APIAgent"]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - -from ...api_agent import APIAgent diff --git a/src/do_gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/providers/anthropic/key_list_params.py deleted file mode 100644 index a11458ad..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/do_gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/providers/anthropic/key_list_response.py deleted file mode 100644 index d0b84e96..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyListResponse"] - - -class KeyListResponse(BaseModel): - api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py deleted file mode 100644 index b8361fc2..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyRetrieveResponse"] - - -class KeyRetrieveResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/providers/anthropic/key_update_params.py deleted file mode 100644 index c07d7f66..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_update_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - api_key: str - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/do_gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/providers/anthropic/key_update_response.py deleted file mode 100644 index b04277a6..00000000 --- a/src/do_gradientai/types/providers/anthropic/key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyUpdateResponse"] - - -class KeyUpdateResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/providers/openai/__init__.py deleted file mode 100644 index 70abf332..00000000 --- a/src/do_gradientai/types/providers/openai/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams -from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/do_gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/providers/openai/key_create_params.py deleted file mode 100644 index 389f167c..00000000 --- a/src/do_gradientai/types/providers/openai/key_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - api_key: str - - name: str diff --git a/src/do_gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/providers/openai/key_create_response.py deleted file mode 100644 index f3b4d36c..00000000 --- a/src/do_gradientai/types/providers/openai/key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyCreateResponse"] - - -class KeyCreateResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/providers/openai/key_delete_response.py deleted file mode 100644 index 0c8922bb..00000000 --- a/src/do_gradientai/types/providers/openai/key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyDeleteResponse"] - - -class KeyDeleteResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/providers/openai/key_list_params.py deleted file mode 100644 index a11458ad..00000000 --- a/src/do_gradientai/types/providers/openai/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/do_gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/providers/openai/key_list_response.py deleted file mode 100644 index c263cba3..00000000 --- a/src/do_gradientai/types/providers/openai/key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyListResponse"] - - -class KeyListResponse(BaseModel): - api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py deleted file mode 100644 index ec745d14..00000000 --- a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyRetrieveAgentsParams"] - - -class KeyRetrieveAgentsParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py deleted file mode 100644 index f42edea6..00000000 --- a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks - -__all__ = ["KeyRetrieveAgentsResponse"] - - -class KeyRetrieveAgentsResponse(BaseModel): - agents: Optional[List["APIAgent"]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - -from ...api_agent import APIAgent diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_response.py deleted file mode 100644 index 7015b6f7..00000000 --- a/src/do_gradientai/types/providers/openai/key_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyRetrieveResponse"] - - -class KeyRetrieveResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/providers/openai/key_update_params.py deleted file mode 100644 index c07d7f66..00000000 --- a/src/do_gradientai/types/providers/openai/key_update_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - api_key: str - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/do_gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/providers/openai/key_update_response.py deleted file mode 100644 index 4889f994..00000000 --- a/src/do_gradientai/types/providers/openai/key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyUpdateResponse"] - - -class KeyUpdateResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/models/__init__.py similarity index 100% rename from tests/api_resources/providers/__init__.py rename to tests/api_resources/models/__init__.py diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/anthropic/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py deleted file mode 100644 index 7aa595f7..00000000 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ /dev/null @@ -1,557 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.providers.anthropic import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyListAgentsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_agents(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list_agents( - uuid="uuid", - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_agents(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_agents(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.list_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_agents(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="", - ) - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list_agents( - uuid="uuid", - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="", - ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/openai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py deleted file mode 100644 index 714dc4bd..00000000 --- a/tests/api_resources/providers/openai/test_keys.py +++ /dev/null @@ -1,557 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.providers.openai import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyRetrieveAgentsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - key = client.providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - key = client.providers.openai.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - key = client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - key = client.providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - key = client.providers.openai.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_agents(self, client: GradientAI) -> None: - key = client.providers.openai.keys.retrieve_agents( - uuid="uuid", - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.retrieve_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_agents(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="", - ) - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( - uuid="uuid", - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="", - ) From 24083ea8db7da01d74d8d6e18952511f2ae58a05 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 03:22:54 +0000 Subject: [PATCH 069/200] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 56 +- src/do_gradientai/_client.py | 41 +- src/do_gradientai/resources/__init__.py | 14 + .../resources/{models => }/models.py | 16 +- .../resources/models/__init__.py | 19 - .../resources/providers/__init__.py | 47 ++ .../resources/providers/anthropic/__init__.py | 33 + .../providers/anthropic/anthropic.py | 102 +++ .../resources/providers/anthropic/keys.py | 686 ++++++++++++++++++ .../resources/providers/openai/__init__.py | 33 + .../resources/providers/openai/keys.py | 682 +++++++++++++++++ .../resources/providers/openai/openai.py | 102 +++ .../resources/providers/providers.py | 134 ++++ .../types/{models => providers}/__init__.py | 0 .../types/providers/anthropic/__init__.py | 14 + .../providers/anthropic/key_create_params.py | 13 + .../anthropic/key_create_response.py | 12 + .../anthropic/key_delete_response.py | 12 + .../anthropic/key_list_agents_params.py | 15 + .../anthropic/key_list_agents_response.py | 22 + .../providers/anthropic/key_list_params.py | 15 + .../providers/anthropic/key_list_response.py | 18 + .../anthropic/key_retrieve_response.py | 12 + .../providers/anthropic/key_update_params.py | 17 + .../anthropic/key_update_response.py | 12 + .../types/providers/openai/__init__.py | 14 + .../providers/openai/key_create_params.py | 13 + .../providers/openai/key_create_response.py | 12 + .../providers/openai/key_delete_response.py | 12 + .../types/providers/openai/key_list_params.py | 15 + .../providers/openai/key_list_response.py | 18 + .../openai/key_retrieve_agents_params.py | 15 + .../openai/key_retrieve_agents_response.py | 22 + .../providers/openai/key_retrieve_response.py | 12 + .../providers/openai/key_update_params.py | 17 + .../providers/openai/key_update_response.py | 12 + .../{models => providers}/__init__.py | 0 .../providers/anthropic/__init__.py | 1 + .../providers/anthropic/test_keys.py | 557 ++++++++++++++ .../providers/openai/__init__.py | 1 + .../providers/openai/test_keys.py | 557 ++++++++++++++ 42 files changed, 3377 insertions(+), 32 deletions(-) rename src/do_gradientai/resources/{models => }/models.py (95%) delete mode 100644 src/do_gradientai/resources/models/__init__.py create mode 100644 src/do_gradientai/resources/providers/__init__.py create mode 100644 src/do_gradientai/resources/providers/anthropic/__init__.py create mode 100644 src/do_gradientai/resources/providers/anthropic/anthropic.py create mode 100644 src/do_gradientai/resources/providers/anthropic/keys.py create mode 100644 src/do_gradientai/resources/providers/openai/__init__.py create mode 100644 src/do_gradientai/resources/providers/openai/keys.py create mode 100644 src/do_gradientai/resources/providers/openai/openai.py create mode 100644 src/do_gradientai/resources/providers/providers.py rename src/do_gradientai/types/{models => providers}/__init__.py (100%) create mode 100644 src/do_gradientai/types/providers/anthropic/__init__.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_create_params.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_create_response.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_delete_response.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_params.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_response.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_params.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_response.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_retrieve_response.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_update_params.py create mode 100644 src/do_gradientai/types/providers/anthropic/key_update_response.py create mode 100644 src/do_gradientai/types/providers/openai/__init__.py create mode 100644 src/do_gradientai/types/providers/openai/key_create_params.py create mode 100644 src/do_gradientai/types/providers/openai/key_create_response.py create mode 100644 src/do_gradientai/types/providers/openai/key_delete_response.py create mode 100644 src/do_gradientai/types/providers/openai/key_list_params.py create mode 100644 src/do_gradientai/types/providers/openai/key_list_response.py create mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py create mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py create mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_response.py create mode 100644 src/do_gradientai/types/providers/openai/key_update_params.py create mode 100644 src/do_gradientai/types/providers/openai/key_update_response.py rename tests/api_resources/{models => providers}/__init__.py (100%) create mode 100644 tests/api_resources/providers/anthropic/__init__.py create mode 100644 tests/api_resources/providers/anthropic/test_keys.py create mode 100644 tests/api_resources/providers/openai/__init__.py create mode 100644 tests/api_resources/providers/openai/test_keys.py diff --git a/.stats.yml b/.stats.yml index 645d4148..e9d82b51 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 55 +configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: ed552d382f42c2e579a4bb0a608e2055 +config_hash: e178baf496088c521dd245cbc46c932a diff --git a/api.md b/api.md index 9811559a..a7297098 100644 --- a/api.md +++ b/api.md @@ -191,6 +191,60 @@ Methods: - client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse - client.agents.child_agents.view(uuid) -> ChildAgentViewResponse +# Providers + +## Anthropic + +### Keys + +Types: + +```python +from do_gradientai.types.providers.anthropic import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyListAgentsResponse, +) +``` + +Methods: + +- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse + +## OpenAI + +### Keys + +Types: + +```python +from do_gradientai.types.providers.openai import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyRetrieveAgentsResponse, +) +``` + +Methods: + +- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse + # Regions Types: @@ -321,4 +375,4 @@ from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelLi Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index afd18a26..8710fe68 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -31,13 +31,14 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases + from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource + from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ @@ -114,6 +115,12 @@ def agents(self) -> AgentsResource: return AgentsResource(self) + @cached_property + def providers(self) -> ProvidersResource: + from .resources.providers import ProvidersResource + + return ProvidersResource(self) + @cached_property def regions(self) -> RegionsResource: from .resources.regions import RegionsResource @@ -327,6 +334,12 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) + @cached_property + def providers(self) -> AsyncProvidersResource: + from .resources.providers import AsyncProvidersResource + + return AsyncProvidersResource(self) + @cached_property def regions(self) -> AsyncRegionsResource: from .resources.regions import AsyncRegionsResource @@ -490,6 +503,12 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) + @cached_property + def providers(self) -> providers.ProvidersResourceWithRawResponse: + from .resources.providers import ProvidersResourceWithRawResponse + + return ProvidersResourceWithRawResponse(self._client.providers) + @cached_property def regions(self) -> regions.RegionsResourceWithRawResponse: from .resources.regions import RegionsResourceWithRawResponse @@ -539,6 +558,12 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: + from .resources.providers import AsyncProvidersResourceWithRawResponse + + return AsyncProvidersResourceWithRawResponse(self._client.providers) + @cached_property def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: from .resources.regions import AsyncRegionsResourceWithRawResponse @@ -588,6 +613,12 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def providers(self) -> providers.ProvidersResourceWithStreamingResponse: + from .resources.providers import ProvidersResourceWithStreamingResponse + + return ProvidersResourceWithStreamingResponse(self._client.providers) + @cached_property def regions(self) -> regions.RegionsResourceWithStreamingResponse: from .resources.regions import RegionsResourceWithStreamingResponse @@ -637,6 +668,12 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: + from .resources.providers import AsyncProvidersResourceWithStreamingResponse + + return AsyncProvidersResourceWithStreamingResponse(self._client.providers) + @cached_property def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: from .resources.regions import AsyncRegionsResourceWithStreamingResponse diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index b074f7d1..1763a13e 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -40,6 +40,14 @@ InferenceResourceWithStreamingResponse, AsyncInferenceResourceWithStreamingResponse, ) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) from .indexing_jobs import ( IndexingJobsResource, AsyncIndexingJobsResource, @@ -64,6 +72,12 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", diff --git a/src/do_gradientai/resources/models/models.py b/src/do_gradientai/resources/models.py similarity index 95% rename from src/do_gradientai/resources/models/models.py rename to src/do_gradientai/resources/models.py index acdd45a1..c8e78b9b 100644 --- a/src/do_gradientai/resources/models/models.py +++ b/src/do_gradientai/resources/models.py @@ -7,19 +7,19 @@ import httpx -from ...types import model_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import model_list_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.model_list_response import ModelListResponse +from .._base_client import make_request_options +from ..types.model_list_response import ModelListResponse __all__ = ["ModelsResource", "AsyncModelsResource"] diff --git a/src/do_gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py deleted file mode 100644 index 7a5c25cc..00000000 --- a/src/do_gradientai/resources/models/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) - -__all__ = [ - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", -] diff --git a/src/do_gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/providers/__init__.py new file mode 100644 index 00000000..1731e057 --- /dev/null +++ b/src/do_gradientai/resources/providers/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) + +__all__ = [ + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/providers/anthropic/__init__.py new file mode 100644 index 00000000..057a3a2f --- /dev/null +++ b/src/do_gradientai/resources/providers/anthropic/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/providers/anthropic/anthropic.py new file mode 100644 index 00000000..23a914e9 --- /dev/null +++ b/src/do_gradientai/resources/providers/anthropic/anthropic.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AnthropicResource", "AsyncAnthropicResource"] + + +class AnthropicResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AnthropicResourceWithStreamingResponse(self) + + +class AsyncAnthropicResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAnthropicResourceWithStreamingResponse(self) + + +class AnthropicResourceWithRawResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithRawResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._anthropic.keys) + + +class AnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/do_gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/providers/anthropic/keys.py new file mode 100644 index 00000000..d1a33290 --- /dev/null +++ b/src/do_gradientai/resources/providers/anthropic/keys.py @@ -0,0 +1,686 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params +from ....types.providers.anthropic.key_list_response import KeyListResponse +from ....types.providers.anthropic.key_create_response import KeyCreateResponse +from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse +from ....types.providers.anthropic.key_update_response import KeyUpdateResponse +from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = to_raw_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_raw_response_wrapper( + keys.list_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = to_streamed_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_streamed_response_wrapper( + keys.list_agents, + ) diff --git a/src/do_gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/providers/openai/__init__.py new file mode 100644 index 00000000..66d8ca7a --- /dev/null +++ b/src/do_gradientai/resources/providers/openai/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/providers/openai/keys.py new file mode 100644 index 00000000..01cfee75 --- /dev/null +++ b/src/do_gradientai/resources/providers/openai/keys.py @@ -0,0 +1,682 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params +from ....types.providers.openai.key_list_response import KeyListResponse +from ....types.providers.openai.key_create_response import KeyCreateResponse +from ....types.providers.openai.key_delete_response import KeyDeleteResponse +from ....types.providers.openai.key_update_response import KeyUpdateResponse +from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_streamed_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_streamed_response_wrapper( + keys.retrieve_agents, + ) diff --git a/src/do_gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/providers/openai/openai.py new file mode 100644 index 00000000..b02dc2e1 --- /dev/null +++ b/src/do_gradientai/resources/providers/openai/openai.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["OpenAIResource", "AsyncOpenAIResource"] + + +class OpenAIResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> OpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return OpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return OpenAIResourceWithStreamingResponse(self) + + +class AsyncOpenAIResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncOpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncOpenAIResourceWithStreamingResponse(self) + + +class OpenAIResourceWithRawResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithRawResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._openai.keys) + + +class OpenAIResourceWithStreamingResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithStreamingResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/do_gradientai/resources/providers/providers.py b/src/do_gradientai/resources/providers/providers.py new file mode 100644 index 00000000..ef942f73 --- /dev/null +++ b/src/do_gradientai/resources/providers/providers.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .openai.openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic.anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = ["ProvidersResource", "AsyncProvidersResource"] + + +class ProvidersResource(SyncAPIResource): + @cached_property + def anthropic(self) -> AnthropicResource: + return AnthropicResource(self._client) + + @cached_property + def openai(self) -> OpenAIResource: + return OpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> ProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ProvidersResourceWithStreamingResponse(self) + + +class AsyncProvidersResource(AsyncAPIResource): + @cached_property + def anthropic(self) -> AsyncAnthropicResource: + return AsyncAnthropicResource(self._client) + + @cached_property + def openai(self) -> AsyncOpenAIResource: + return AsyncOpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncProvidersResourceWithStreamingResponse(self) + + +class ProvidersResourceWithRawResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithRawResponse: + return AnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithRawResponse: + return OpenAIResourceWithRawResponse(self._providers.openai) + + +class AsyncProvidersResourceWithRawResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: + return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithRawResponse: + return AsyncOpenAIResourceWithRawResponse(self._providers.openai) + + +class ProvidersResourceWithStreamingResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithStreamingResponse: + return AnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithStreamingResponse: + return OpenAIResourceWithStreamingResponse(self._providers.openai) + + +class AsyncProvidersResourceWithStreamingResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: + return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: + return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/do_gradientai/types/models/__init__.py b/src/do_gradientai/types/providers/__init__.py similarity index 100% rename from src/do_gradientai/types/models/__init__.py rename to src/do_gradientai/types/providers/__init__.py diff --git a/src/do_gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/providers/anthropic/__init__.py new file mode 100644 index 00000000..eb47e709 --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/__init__.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams +from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/do_gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/providers/anthropic/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/do_gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/providers/anthropic/key_create_response.py new file mode 100644 index 00000000..a032810c --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/providers/anthropic/key_delete_response.py new file mode 100644 index 00000000..2afe2dda --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py new file mode 100644 index 00000000..ebbc3b7e --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListAgentsParams"] + + +class KeyListAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py new file mode 100644 index 00000000..ba6ca946 --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyListAgentsResponse"] + + +class KeyListAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent diff --git a/src/do_gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/providers/anthropic/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/do_gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/providers/anthropic/key_list_response.py new file mode 100644 index 00000000..d0b84e96 --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py new file mode 100644 index 00000000..b8361fc2 --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/providers/anthropic/key_update_params.py new file mode 100644 index 00000000..c07d7f66 --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/do_gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/providers/anthropic/key_update_response.py new file mode 100644 index 00000000..b04277a6 --- /dev/null +++ b/src/do_gradientai/types/providers/anthropic/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/providers/openai/__init__.py new file mode 100644 index 00000000..70abf332 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/__init__.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams +from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/do_gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/providers/openai/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/do_gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/providers/openai/key_create_response.py new file mode 100644 index 00000000..f3b4d36c --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/providers/openai/key_delete_response.py new file mode 100644 index 00000000..0c8922bb --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/providers/openai/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/do_gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/providers/openai/key_list_response.py new file mode 100644 index 00000000..c263cba3 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py new file mode 100644 index 00000000..ec745d14 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyRetrieveAgentsParams"] + + +class KeyRetrieveAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py new file mode 100644 index 00000000..f42edea6 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyRetrieveAgentsResponse"] + + +class KeyRetrieveAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_response.py new file mode 100644 index 00000000..7015b6f7 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/do_gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/providers/openai/key_update_params.py new file mode 100644 index 00000000..c07d7f66 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/do_gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/providers/openai/key_update_response.py new file mode 100644 index 00000000..4889f994 --- /dev/null +++ b/src/do_gradientai/types/providers/openai/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/tests/api_resources/models/__init__.py b/tests/api_resources/providers/__init__.py similarity index 100% rename from tests/api_resources/models/__init__.py rename to tests/api_resources/providers/__init__.py diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/anthropic/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py new file mode 100644 index 00000000..7aa595f7 --- /dev/null +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -0,0 +1,557 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.providers.anthropic import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyListAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_agents(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_agents(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/openai/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py new file mode 100644 index 00000000..714dc4bd --- /dev/null +++ b/tests/api_resources/providers/openai/test_keys.py @@ -0,0 +1,557 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.providers.openai import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyRetrieveAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) From 0a83ac32294e2c73c68fd8fa792aed1b52b8af33 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 05:30:27 +0000 Subject: [PATCH 070/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 46 +++++----- src/do_gradientai/_client.py | 39 +-------- src/do_gradientai/resources/__init__.py | 14 --- .../resources/knowledge_bases/__init__.py | 14 +++ .../{ => knowledge_bases}/indexing_jobs.py | 28 +++--- .../knowledge_bases/knowledge_bases.py | 32 +++++++ src/do_gradientai/types/__init__.py | 11 --- src/do_gradientai/types/api_knowledge_base.py | 2 +- .../types/knowledge_bases/__init__.py | 11 +++ .../{ => knowledge_bases}/api_indexing_job.py | 2 +- .../api_knowledge_base_data_source.py | 2 +- .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 2 +- .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 6 +- ...xing_job_retrieve_data_sources_response.py | 2 +- .../indexing_job_retrieve_response.py | 2 +- .../indexing_job_update_cancel_params.py | 2 +- .../indexing_job_update_cancel_response.py | 2 +- .../test_indexing_jobs.py | 86 +++++++++---------- 21 files changed, 152 insertions(+), 153 deletions(-) rename src/do_gradientai/resources/{ => knowledge_bases}/indexing_jobs.py (95%) rename src/do_gradientai/types/{ => knowledge_bases}/api_indexing_job.py (96%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_create_params.py (100%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_create_response.py (89%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_list_params.py (100%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_list_response.py (77%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_data_sources_response.py (97%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_response.py (89%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_params.py (91%) rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_response.py (90%) rename tests/api_resources/{ => knowledge_bases}/test_indexing_jobs.py (80%) diff --git a/.stats.yml b/.stats.yml index e9d82b51..3dd4e641 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: e178baf496088c521dd245cbc46c932a +config_hash: 2529d2f80a3d70107331426b594b7f9b diff --git a/api.md b/api.md index a7297098..b67c57a4 100644 --- a/api.md +++ b/api.md @@ -257,29 +257,6 @@ Methods: - client.regions.list(\*\*params) -> RegionListResponse -# IndexingJobs - -Types: - -```python -from do_gradientai.types import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) -``` - -Methods: - -- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # KnowledgeBases Types: @@ -326,6 +303,29 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +## IndexingJobs + +Types: + +```python +from do_gradientai.types.knowledge_bases import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # Chat ## Completions diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index 8710fe68..b8d55962 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -31,12 +31,11 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -127,12 +126,6 @@ def regions(self) -> RegionsResource: return RegionsResource(self) - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - from .resources.indexing_jobs import IndexingJobsResource - - return IndexingJobsResource(self) - @cached_property def knowledge_bases(self) -> KnowledgeBasesResource: from .resources.knowledge_bases import KnowledgeBasesResource @@ -346,12 +339,6 @@ def regions(self) -> AsyncRegionsResource: return AsyncRegionsResource(self) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - from .resources.indexing_jobs import AsyncIndexingJobsResource - - return AsyncIndexingJobsResource(self) - @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResource: from .resources.knowledge_bases import AsyncKnowledgeBasesResource @@ -515,12 +502,6 @@ def regions(self) -> regions.RegionsResourceWithRawResponse: return RegionsResourceWithRawResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse - - return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse @@ -570,12 +551,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: return AsyncRegionsResourceWithRawResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse - - return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse @@ -625,12 +600,6 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse: return RegionsResourceWithStreamingResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse - - return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse @@ -680,12 +649,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: return AsyncRegionsResourceWithStreamingResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse - - return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index 1763a13e..6ad0aa32 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -48,14 +48,6 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -84,12 +76,6 @@ "AsyncRegionsResourceWithRawResponse", "RegionsResourceWithStreamingResponse", "AsyncRegionsResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/do_gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py index 03d143e2..80d04328 100644 --- a/src/do_gradientai/resources/knowledge_bases/__init__.py +++ b/src/do_gradientai/resources/knowledge_bases/__init__.py @@ -8,6 +8,14 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -24,6 +32,12 @@ "AsyncDataSourcesResourceWithRawResponse", "DataSourcesResourceWithStreamingResponse", "AsyncDataSourcesResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/do_gradientai/resources/indexing_jobs.py b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 95% rename from src/do_gradientai/resources/indexing_jobs.py rename to src/do_gradientai/resources/knowledge_bases/indexing_jobs.py index 71c59023..39151e41 100644 --- a/src/do_gradientai/resources/indexing_jobs.py +++ b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py @@ -6,23 +6,27 @@ import httpx -from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.indexing_job_list_response import IndexingJobListResponse -from ..types.indexing_job_create_response import IndexingJobCreateResponse -from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse +from ..._base_client import make_request_options +from ...types.knowledge_bases import ( + indexing_job_list_params, + indexing_job_create_params, + indexing_job_update_cancel_params, +) +from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse +from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse +from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] diff --git a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py index 2cab4f7b..28acdd7f 100644 --- a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py @@ -25,6 +25,14 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse @@ -40,6 +48,10 @@ class KnowledgeBasesResource(SyncAPIResource): def data_sources(self) -> DataSourcesResource: return DataSourcesResource(self._client) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + return IndexingJobsResource(self._client) + @cached_property def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: """ @@ -316,6 +328,10 @@ class AsyncKnowledgeBasesResource(AsyncAPIResource): def data_sources(self) -> AsyncDataSourcesResource: return AsyncDataSourcesResource(self._client) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + return AsyncIndexingJobsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: """ @@ -611,6 +627,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithRawResponse: return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: + return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) + class AsyncKnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -636,6 +656,10 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: + return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) + class KnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -661,6 +685,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithStreamingResponse: return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: + return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) + class AsyncKnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -685,3 +713,7 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py index e3c2ab9c..dde7f848 100644 --- a/src/do_gradientai/types/__init__.py +++ b/src/do_gradientai/types/__init__.py @@ -7,7 +7,6 @@ from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel -from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams @@ -25,25 +24,15 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) diff --git a/src/do_gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py index 5b4b6e2c..2b0676f0 100644 --- a/src/do_gradientai/types/api_knowledge_base.py +++ b/src/do_gradientai/types/api_knowledge_base.py @@ -4,7 +4,7 @@ from datetime import datetime from .._models import BaseModel -from .api_indexing_job import APIIndexingJob +from .knowledge_bases.api_indexing_job import APIIndexingJob __all__ = ["APIKnowledgeBase"] diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py index 859c3618..9fc915e5 100644 --- a/src/do_gradientai/types/knowledge_bases/__init__.py +++ b/src/do_gradientai/types/knowledge_bases/__init__.py @@ -2,16 +2,27 @@ from __future__ import annotations +from .api_indexing_job import APIIndexingJob as APIIndexingJob from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams from .data_source_list_response import DataSourceListResponse as DataSourceListResponse +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) diff --git a/src/do_gradientai/types/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py similarity index 96% rename from src/do_gradientai/types/api_indexing_job.py rename to src/do_gradientai/types/knowledge_bases/api_indexing_job.py index f24aac94..2809141c 100644 --- a/src/do_gradientai/types/api_indexing_job.py +++ b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["APIIndexingJob"] diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index 57080aaa..ca24d6f0 100644 --- a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -4,7 +4,7 @@ from datetime import datetime from ..._models import BaseModel -from ..api_indexing_job import APIIndexingJob +from .api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource diff --git a/src/do_gradientai/types/indexing_job_create_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/do_gradientai/types/indexing_job_create_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/do_gradientai/types/indexing_job_create_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 89% rename from src/do_gradientai/types/indexing_job_create_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py index 839bc83b..835ec60d 100644 --- a/src/do_gradientai/types/indexing_job_create_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobCreateResponse"] diff --git a/src/do_gradientai/types/indexing_job_list_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/do_gradientai/types/indexing_job_list_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/do_gradientai/types/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 77% rename from src/do_gradientai/types/indexing_job_list_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py index 1379cc55..4784c1a1 100644 --- a/src/do_gradientai/types/indexing_job_list_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -2,10 +2,10 @@ from typing import List, Optional -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from ..._models import BaseModel +from ..agents.api_meta import APIMeta from .api_indexing_job import APIIndexingJob +from ..agents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 97% rename from src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py index b178b984..a9d0c2c0 100644 --- a/src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] diff --git a/src/do_gradientai/types/indexing_job_retrieve_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 89% rename from src/do_gradientai/types/indexing_job_retrieve_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py index 95f33d7a..6034bdf1 100644 --- a/src/do_gradientai/types/indexing_job_retrieve_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobRetrieveResponse"] diff --git a/src/do_gradientai/types/indexing_job_update_cancel_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 91% rename from src/do_gradientai/types/indexing_job_update_cancel_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py index 4c2848b0..9359a42a 100644 --- a/src/do_gradientai/types/indexing_job_update_cancel_params.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from .._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["IndexingJobUpdateCancelParams"] diff --git a/src/do_gradientai/types/indexing_job_update_cancel_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 90% rename from src/do_gradientai/types/indexing_job_update_cancel_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py index d50e1865..ae4b394f 100644 --- a/src/do_gradientai/types/indexing_job_update_cancel_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobUpdateCancelResponse"] diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py similarity index 80% rename from tests/api_resources/test_indexing_jobs.py rename to tests/api_resources/knowledge_bases/test_indexing_jobs.py index 41ba0f8c..206339e0 100644 --- a/tests/api_resources/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from do_gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -26,13 +26,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.create() + indexing_job = client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.create( + indexing_job = client.knowledge_bases.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.create() + response = client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.create() as response: + with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.retrieve( + indexing_job = client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.retrieve( + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.retrieve( + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve( + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.list() + indexing_job = client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.list( + indexing_job = client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, ) @@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.list() + response = client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -130,7 +130,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.list() as response: + with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_data_sources(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.retrieve_data_sources( + indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.retrieve_data_sources( + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.retrieve_data_sources( + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N @parametrize def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve_data_sources( + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.update_cancel( + indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.update_cancel( + indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.update_cancel( + response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.update_cancel( + with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_path_params_update_cancel(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.indexing_jobs.with_raw_response.update_cancel( + client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) @@ -241,13 +241,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.create() + indexing_job = await async_client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.create( + indexing_job = await async_client.knowledge_bases.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -256,7 +256,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.create() + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.create() as response: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.retrieve( + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -286,7 +286,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -298,7 +298,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -313,20 +313,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.list() + indexing_job = await async_client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.list( + indexing_job = await async_client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, ) @@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.list() + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -345,7 +345,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.list() as response: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,7 +357,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.retrieve_data_sources( + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -365,7 +365,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -377,7 +377,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -392,14 +392,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( + indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -407,7 +407,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( + indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -416,7 +416,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.update_cancel( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -428,7 +428,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.update_cancel( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -443,6 +443,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien @parametrize async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.update_cancel( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) From 984e5ed03195d712cf751f594d703fe16c4f1e46 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 06:10:32 +0000 Subject: [PATCH 071/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 48 ++--- src/do_gradientai/_client.py | 40 ++-- src/do_gradientai/resources/__init__.py | 28 +-- .../resources/agents/__init__.py | 28 +-- src/do_gradientai/resources/agents/agents.py | 40 ++-- .../agents/{child_agents.py => routes.py} | 128 ++++++------ .../__init__.py | 26 +-- .../anthropic/__init__.py | 0 .../anthropic/anthropic.py | 0 .../anthropic/keys.py | 19 +- .../model_providers.py} | 62 +++--- .../openai/__init__.py | 0 .../openai/keys.py | 19 +- .../openai/openai.py | 0 src/do_gradientai/types/agents/__init__.py | 12 +- ...gent_add_params.py => route_add_params.py} | 4 +- ..._add_response.py => route_add_response.py} | 4 +- ...e_response.py => route_delete_response.py} | 4 +- ...pdate_params.py => route_update_params.py} | 4 +- ...e_response.py => route_update_response.py} | 4 +- ...iew_response.py => route_view_response.py} | 4 +- .../__init__.py | 0 .../anthropic/__init__.py | 0 .../anthropic/key_create_params.py | 0 .../anthropic/key_create_response.py | 0 .../anthropic/key_delete_response.py | 0 .../anthropic/key_list_agents_params.py | 0 .../anthropic/key_list_agents_response.py | 0 .../anthropic/key_list_params.py | 0 .../anthropic/key_list_response.py | 0 .../anthropic/key_retrieve_response.py | 0 .../anthropic/key_update_params.py | 0 .../anthropic/key_update_response.py | 0 .../openai/__init__.py | 0 .../openai/key_create_params.py | 0 .../openai/key_create_response.py | 0 .../openai/key_delete_response.py | 0 .../openai/key_list_params.py | 0 .../openai/key_list_response.py | 0 .../openai/key_retrieve_agents_params.py | 0 .../openai/key_retrieve_agents_response.py | 0 .../openai/key_retrieve_response.py | 0 .../openai/key_update_params.py | 0 .../openai/key_update_response.py | 0 .../{test_child_agents.py => test_routes.py} | 184 +++++++++--------- .../__init__.py | 0 .../anthropic/__init__.py | 0 .../anthropic/test_keys.py | 106 +++++----- .../openai/__init__.py | 0 .../openai/test_keys.py | 106 +++++----- 51 files changed, 441 insertions(+), 431 deletions(-) rename src/do_gradientai/resources/agents/{child_agents.py => routes.py} (86%) rename src/do_gradientai/resources/{providers => model_providers}/__init__.py (65%) rename src/do_gradientai/resources/{providers => model_providers}/anthropic/__init__.py (100%) rename src/do_gradientai/resources/{providers => model_providers}/anthropic/anthropic.py (100%) rename src/do_gradientai/resources/{providers => model_providers}/anthropic/keys.py (97%) rename src/do_gradientai/resources/{providers/providers.py => model_providers/model_providers.py} (61%) rename src/do_gradientai/resources/{providers => model_providers}/openai/__init__.py (100%) rename src/do_gradientai/resources/{providers => model_providers}/openai/keys.py (97%) rename src/do_gradientai/resources/{providers => model_providers}/openai/openai.py (100%) rename src/do_gradientai/types/agents/{child_agent_add_params.py => route_add_params.py} (87%) rename src/do_gradientai/types/agents/{child_agent_add_response.py => route_add_response.py} (79%) rename src/do_gradientai/types/agents/{child_agent_delete_response.py => route_delete_response.py} (74%) rename src/do_gradientai/types/agents/{child_agent_update_params.py => route_update_params.py} (86%) rename src/do_gradientai/types/agents/{child_agent_update_response.py => route_update_response.py} (81%) rename src/do_gradientai/types/agents/{child_agent_view_response.py => route_view_response.py} (78%) rename src/do_gradientai/types/{providers => model_providers}/__init__.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/__init__.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_create_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_create_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_delete_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_agents_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_agents_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_retrieve_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_update_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_update_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/__init__.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_create_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_create_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_delete_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_list_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_list_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_retrieve_agents_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_retrieve_agents_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_retrieve_response.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_update_params.py (100%) rename src/do_gradientai/types/{providers => model_providers}/openai/key_update_response.py (100%) rename tests/api_resources/agents/{test_child_agents.py => test_routes.py} (69%) rename tests/api_resources/{providers => model_providers}/__init__.py (100%) rename tests/api_resources/{providers => model_providers}/anthropic/__init__.py (100%) rename tests/api_resources/{providers => model_providers}/anthropic/test_keys.py (80%) rename tests/api_resources/{providers => model_providers}/openai/__init__.py (100%) rename tests/api_resources/{providers => model_providers}/openai/test_keys.py (81%) diff --git a/.stats.yml b/.stats.yml index 3dd4e641..d58c3c34 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 2529d2f80a3d70107331426b594b7f9b +config_hash: a5bfbbd032355b26ddd41d659c93495b diff --git a/api.md b/api.md index b67c57a4..686761f9 100644 --- a/api.md +++ b/api.md @@ -171,27 +171,27 @@ Methods: - client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput - client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse -## ChildAgents +## Routes Types: ```python from do_gradientai.types.agents import ( - ChildAgentUpdateResponse, - ChildAgentDeleteResponse, - ChildAgentAddResponse, - ChildAgentViewResponse, + RouteUpdateResponse, + RouteDeleteResponse, + RouteAddResponse, + RouteViewResponse, ) ``` Methods: -- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse -# Providers +# ModelProviders ## Anthropic @@ -200,7 +200,7 @@ Methods: Types: ```python -from do_gradientai.types.providers.anthropic import ( +from do_gradientai.types.model_providers.anthropic import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -212,12 +212,12 @@ from do_gradientai.types.providers.anthropic import ( Methods: -- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +- client.model_providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.model_providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.model_providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.model_providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.model_providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.model_providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse ## OpenAI @@ -226,7 +226,7 @@ Methods: Types: ```python -from do_gradientai.types.providers.openai import ( +from do_gradientai.types.model_providers.openai import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -238,12 +238,12 @@ from do_gradientai.types.providers.openai import ( Methods: -- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse +- client.model_providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.model_providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.model_providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.model_providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.model_providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.model_providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse # Regions diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index b8d55962..a57125ee 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -31,14 +31,14 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, knowledge_bases + from .resources import chat, agents, models, regions, inference, knowledge_bases, model_providers from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource - from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource + from .resources.model_providers.model_providers import ModelProvidersResource, AsyncModelProvidersResource __all__ = [ "Timeout", @@ -115,10 +115,10 @@ def agents(self) -> AgentsResource: return AgentsResource(self) @cached_property - def providers(self) -> ProvidersResource: - from .resources.providers import ProvidersResource + def model_providers(self) -> ModelProvidersResource: + from .resources.model_providers import ModelProvidersResource - return ProvidersResource(self) + return ModelProvidersResource(self) @cached_property def regions(self) -> RegionsResource: @@ -328,10 +328,10 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) @cached_property - def providers(self) -> AsyncProvidersResource: - from .resources.providers import AsyncProvidersResource + def model_providers(self) -> AsyncModelProvidersResource: + from .resources.model_providers import AsyncModelProvidersResource - return AsyncProvidersResource(self) + return AsyncModelProvidersResource(self) @cached_property def regions(self) -> AsyncRegionsResource: @@ -491,10 +491,10 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) @cached_property - def providers(self) -> providers.ProvidersResourceWithRawResponse: - from .resources.providers import ProvidersResourceWithRawResponse + def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse: + from .resources.model_providers import ModelProvidersResourceWithRawResponse - return ProvidersResourceWithRawResponse(self._client.providers) + return ModelProvidersResourceWithRawResponse(self._client.model_providers) @cached_property def regions(self) -> regions.RegionsResourceWithRawResponse: @@ -540,10 +540,10 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: - from .resources.providers import AsyncProvidersResourceWithRawResponse + def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse: + from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse - return AsyncProvidersResourceWithRawResponse(self._client.providers) + return AsyncModelProvidersResourceWithRawResponse(self._client.model_providers) @cached_property def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: @@ -589,10 +589,10 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) @cached_property - def providers(self) -> providers.ProvidersResourceWithStreamingResponse: - from .resources.providers import ProvidersResourceWithStreamingResponse + def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse: + from .resources.model_providers import ModelProvidersResourceWithStreamingResponse - return ProvidersResourceWithStreamingResponse(self._client.providers) + return ModelProvidersResourceWithStreamingResponse(self._client.model_providers) @cached_property def regions(self) -> regions.RegionsResourceWithStreamingResponse: @@ -638,10 +638,10 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: - from .resources.providers import AsyncProvidersResourceWithStreamingResponse + def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse: + from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse - return AsyncProvidersResourceWithStreamingResponse(self._client.providers) + return AsyncModelProvidersResourceWithStreamingResponse(self._client.model_providers) @cached_property def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index 6ad0aa32..785bf1ac 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -40,14 +40,6 @@ InferenceResourceWithStreamingResponse, AsyncInferenceResourceWithStreamingResponse, ) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -56,6 +48,14 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) +from .model_providers import ( + ModelProvidersResource, + AsyncModelProvidersResource, + ModelProvidersResourceWithRawResponse, + AsyncModelProvidersResourceWithRawResponse, + ModelProvidersResourceWithStreamingResponse, + AsyncModelProvidersResourceWithStreamingResponse, +) __all__ = [ "AgentsResource", @@ -64,12 +64,12 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", + "ModelProvidersResource", + "AsyncModelProvidersResource", + "ModelProvidersResourceWithRawResponse", + "AsyncModelProvidersResourceWithRawResponse", + "ModelProvidersResourceWithStreamingResponse", + "AsyncModelProvidersResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py index 3eb9cde8..f5423f00 100644 --- a/src/do_gradientai/resources/agents/__init__.py +++ b/src/do_gradientai/resources/agents/__init__.py @@ -8,6 +8,14 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .routes import ( + RoutesResource, + AsyncRoutesResource, + RoutesResourceWithRawResponse, + AsyncRoutesResourceWithRawResponse, + RoutesResourceWithStreamingResponse, + AsyncRoutesResourceWithStreamingResponse, +) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -32,14 +40,6 @@ FunctionsResourceWithStreamingResponse, AsyncFunctionsResourceWithStreamingResponse, ) -from .child_agents import ( - ChildAgentsResource, - AsyncChildAgentsResource, - ChildAgentsResourceWithRawResponse, - AsyncChildAgentsResourceWithRawResponse, - ChildAgentsResourceWithStreamingResponse, - AsyncChildAgentsResourceWithStreamingResponse, -) from .evaluation_runs import ( EvaluationRunsResource, AsyncEvaluationRunsResource, @@ -130,12 +130,12 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", - "ChildAgentsResource", - "AsyncChildAgentsResource", - "ChildAgentsResourceWithRawResponse", - "AsyncChildAgentsResourceWithRawResponse", - "ChildAgentsResourceWithStreamingResponse", - "AsyncChildAgentsResourceWithStreamingResponse", + "RoutesResource", + "AsyncRoutesResource", + "RoutesResourceWithRawResponse", + "AsyncRoutesResourceWithRawResponse", + "RoutesResourceWithStreamingResponse", + "AsyncRoutesResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", diff --git a/src/do_gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py index 6bb39894..5762139d 100644 --- a/src/do_gradientai/resources/agents/agents.py +++ b/src/do_gradientai/resources/agents/agents.py @@ -6,6 +6,14 @@ import httpx +from .routes import ( + RoutesResource, + AsyncRoutesResource, + RoutesResourceWithRawResponse, + AsyncRoutesResourceWithRawResponse, + RoutesResourceWithStreamingResponse, + AsyncRoutesResourceWithStreamingResponse, +) from ...types import ( APIRetrievalMethod, APIDeploymentVisibility, @@ -48,14 +56,6 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .child_agents import ( - ChildAgentsResource, - AsyncChildAgentsResource, - ChildAgentsResourceWithRawResponse, - AsyncChildAgentsResourceWithRawResponse, - ChildAgentsResourceWithStreamingResponse, - AsyncChildAgentsResourceWithStreamingResponse, -) from ..._base_client import make_request_options from .evaluation_runs import ( EvaluationRunsResource, @@ -143,8 +143,8 @@ def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self._client) @cached_property - def child_agents(self) -> ChildAgentsResource: - return ChildAgentsResource(self._client) + def routes(self) -> RoutesResource: + return RoutesResource(self._client) @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: @@ -527,8 +527,8 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self._client) @cached_property - def child_agents(self) -> AsyncChildAgentsResource: - return AsyncChildAgentsResource(self._client) + def routes(self) -> AsyncRoutesResource: + return AsyncRoutesResource(self._client) @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: @@ -933,8 +933,8 @@ def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) @cached_property - def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._agents.child_agents) + def routes(self) -> RoutesResourceWithRawResponse: + return RoutesResourceWithRawResponse(self._agents.routes) class AsyncAgentsResourceWithRawResponse: @@ -993,8 +993,8 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) @cached_property - def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) + def routes(self) -> AsyncRoutesResourceWithRawResponse: + return AsyncRoutesResourceWithRawResponse(self._agents.routes) class AgentsResourceWithStreamingResponse: @@ -1053,8 +1053,8 @@ def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) @cached_property - def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + def routes(self) -> RoutesResourceWithStreamingResponse: + return RoutesResourceWithStreamingResponse(self._agents.routes) class AsyncAgentsResourceWithStreamingResponse: @@ -1113,5 +1113,5 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) @cached_property - def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + def routes(self) -> AsyncRoutesResourceWithStreamingResponse: + return AsyncRoutesResourceWithStreamingResponse(self._agents.routes) diff --git a/src/do_gradientai/resources/agents/child_agents.py b/src/do_gradientai/resources/agents/routes.py similarity index 86% rename from src/do_gradientai/resources/agents/child_agents.py rename to src/do_gradientai/resources/agents/routes.py index ad30f106..ed25d795 100644 --- a/src/do_gradientai/resources/agents/child_agents.py +++ b/src/do_gradientai/resources/agents/routes.py @@ -15,34 +15,34 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import child_agent_add_params, child_agent_update_params -from ...types.agents.child_agent_add_response import ChildAgentAddResponse -from ...types.agents.child_agent_view_response import ChildAgentViewResponse -from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse +from ...types.agents import route_add_params, route_update_params +from ...types.agents.route_add_response import RouteAddResponse +from ...types.agents.route_view_response import RouteViewResponse +from ...types.agents.route_delete_response import RouteDeleteResponse +from ...types.agents.route_update_response import RouteUpdateResponse -__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] +__all__ = ["RoutesResource", "AsyncRoutesResource"] -class ChildAgentsResource(SyncAPIResource): +class RoutesResource(SyncAPIResource): @cached_property - def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: + def with_raw_response(self) -> RoutesResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return ChildAgentsResourceWithRawResponse(self) + return RoutesResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: + def with_streaming_response(self) -> RoutesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return ChildAgentsResourceWithStreamingResponse(self) + return RoutesResourceWithStreamingResponse(self) def update( self, @@ -60,7 +60,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentUpdateResponse: + ) -> RouteUpdateResponse: """ To update an agent route for an agent, send a PUT request to `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. @@ -96,12 +96,12 @@ def update( "route_name": route_name, "uuid": uuid, }, - child_agent_update_params.ChildAgentUpdateParams, + route_update_params.RouteUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentUpdateResponse, + cast_to=RouteUpdateResponse, ) def delete( @@ -115,7 +115,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentDeleteResponse: + ) -> RouteDeleteResponse: """ To delete an agent route from a parent agent, send a DELETE request to `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. @@ -140,7 +140,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentDeleteResponse, + cast_to=RouteDeleteResponse, ) def add( @@ -158,7 +158,7 @@ def add( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentAddResponse: + ) -> RouteAddResponse: """ To add an agent route to an agent, send a POST request to `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. @@ -193,12 +193,12 @@ def add( "body_parent_agent_uuid": body_parent_agent_uuid, "route_name": route_name, }, - child_agent_add_params.ChildAgentAddParams, + route_add_params.RouteAddParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentAddResponse, + cast_to=RouteAddResponse, ) def view( @@ -211,7 +211,7 @@ def view( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentViewResponse: + ) -> RouteViewResponse: """ To view agent routes for an agent, send a GET requtest to `/v2/gen-ai/agents/{uuid}/child_agents`. @@ -234,29 +234,29 @@ def view( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentViewResponse, + cast_to=RouteViewResponse, ) -class AsyncChildAgentsResource(AsyncAPIResource): +class AsyncRoutesResource(AsyncAPIResource): @cached_property - def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: + def with_raw_response(self) -> AsyncRoutesResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return AsyncChildAgentsResourceWithRawResponse(self) + return AsyncRoutesResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncRoutesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return AsyncChildAgentsResourceWithStreamingResponse(self) + return AsyncRoutesResourceWithStreamingResponse(self) async def update( self, @@ -274,7 +274,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentUpdateResponse: + ) -> RouteUpdateResponse: """ To update an agent route for an agent, send a PUT request to `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. @@ -310,12 +310,12 @@ async def update( "route_name": route_name, "uuid": uuid, }, - child_agent_update_params.ChildAgentUpdateParams, + route_update_params.RouteUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentUpdateResponse, + cast_to=RouteUpdateResponse, ) async def delete( @@ -329,7 +329,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentDeleteResponse: + ) -> RouteDeleteResponse: """ To delete an agent route from a parent agent, send a DELETE request to `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. @@ -354,7 +354,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentDeleteResponse, + cast_to=RouteDeleteResponse, ) async def add( @@ -372,7 +372,7 @@ async def add( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentAddResponse: + ) -> RouteAddResponse: """ To add an agent route to an agent, send a POST request to `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. @@ -407,12 +407,12 @@ async def add( "body_parent_agent_uuid": body_parent_agent_uuid, "route_name": route_name, }, - child_agent_add_params.ChildAgentAddParams, + route_add_params.RouteAddParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentAddResponse, + cast_to=RouteAddResponse, ) async def view( @@ -425,7 +425,7 @@ async def view( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentViewResponse: + ) -> RouteViewResponse: """ To view agent routes for an agent, send a GET requtest to `/v2/gen-ai/agents/{uuid}/child_agents`. @@ -448,77 +448,77 @@ async def view( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ChildAgentViewResponse, + cast_to=RouteViewResponse, ) -class ChildAgentsResourceWithRawResponse: - def __init__(self, child_agents: ChildAgentsResource) -> None: - self._child_agents = child_agents +class RoutesResourceWithRawResponse: + def __init__(self, routes: RoutesResource) -> None: + self._routes = routes self.update = to_raw_response_wrapper( - child_agents.update, + routes.update, ) self.delete = to_raw_response_wrapper( - child_agents.delete, + routes.delete, ) self.add = to_raw_response_wrapper( - child_agents.add, + routes.add, ) self.view = to_raw_response_wrapper( - child_agents.view, + routes.view, ) -class AsyncChildAgentsResourceWithRawResponse: - def __init__(self, child_agents: AsyncChildAgentsResource) -> None: - self._child_agents = child_agents +class AsyncRoutesResourceWithRawResponse: + def __init__(self, routes: AsyncRoutesResource) -> None: + self._routes = routes self.update = async_to_raw_response_wrapper( - child_agents.update, + routes.update, ) self.delete = async_to_raw_response_wrapper( - child_agents.delete, + routes.delete, ) self.add = async_to_raw_response_wrapper( - child_agents.add, + routes.add, ) self.view = async_to_raw_response_wrapper( - child_agents.view, + routes.view, ) -class ChildAgentsResourceWithStreamingResponse: - def __init__(self, child_agents: ChildAgentsResource) -> None: - self._child_agents = child_agents +class RoutesResourceWithStreamingResponse: + def __init__(self, routes: RoutesResource) -> None: + self._routes = routes self.update = to_streamed_response_wrapper( - child_agents.update, + routes.update, ) self.delete = to_streamed_response_wrapper( - child_agents.delete, + routes.delete, ) self.add = to_streamed_response_wrapper( - child_agents.add, + routes.add, ) self.view = to_streamed_response_wrapper( - child_agents.view, + routes.view, ) -class AsyncChildAgentsResourceWithStreamingResponse: - def __init__(self, child_agents: AsyncChildAgentsResource) -> None: - self._child_agents = child_agents +class AsyncRoutesResourceWithStreamingResponse: + def __init__(self, routes: AsyncRoutesResource) -> None: + self._routes = routes self.update = async_to_streamed_response_wrapper( - child_agents.update, + routes.update, ) self.delete = async_to_streamed_response_wrapper( - child_agents.delete, + routes.delete, ) self.add = async_to_streamed_response_wrapper( - child_agents.add, + routes.add, ) self.view = async_to_streamed_response_wrapper( - child_agents.view, + routes.view, ) diff --git a/src/do_gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/model_providers/__init__.py similarity index 65% rename from src/do_gradientai/resources/providers/__init__.py rename to src/do_gradientai/resources/model_providers/__init__.py index 1731e057..3d91a86c 100644 --- a/src/do_gradientai/resources/providers/__init__.py +++ b/src/do_gradientai/resources/model_providers/__init__.py @@ -16,13 +16,13 @@ AnthropicResourceWithStreamingResponse, AsyncAnthropicResourceWithStreamingResponse, ) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, +from .model_providers import ( + ModelProvidersResource, + AsyncModelProvidersResource, + ModelProvidersResourceWithRawResponse, + AsyncModelProvidersResourceWithRawResponse, + ModelProvidersResourceWithStreamingResponse, + AsyncModelProvidersResourceWithStreamingResponse, ) __all__ = [ @@ -38,10 +38,10 @@ "AsyncOpenAIResourceWithRawResponse", "OpenAIResourceWithStreamingResponse", "AsyncOpenAIResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", + "ModelProvidersResource", + "AsyncModelProvidersResource", + "ModelProvidersResourceWithRawResponse", + "AsyncModelProvidersResourceWithRawResponse", + "ModelProvidersResourceWithStreamingResponse", + "AsyncModelProvidersResourceWithStreamingResponse", ] diff --git a/src/do_gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/model_providers/anthropic/__init__.py similarity index 100% rename from src/do_gradientai/resources/providers/anthropic/__init__.py rename to src/do_gradientai/resources/model_providers/anthropic/__init__.py diff --git a/src/do_gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/model_providers/anthropic/anthropic.py similarity index 100% rename from src/do_gradientai/resources/providers/anthropic/anthropic.py rename to src/do_gradientai/resources/model_providers/anthropic/anthropic.py diff --git a/src/do_gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/model_providers/anthropic/keys.py similarity index 97% rename from src/do_gradientai/resources/providers/anthropic/keys.py rename to src/do_gradientai/resources/model_providers/anthropic/keys.py index d1a33290..4d884655 100644 --- a/src/do_gradientai/resources/providers/anthropic/keys.py +++ b/src/do_gradientai/resources/model_providers/anthropic/keys.py @@ -15,13 +15,18 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options -from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params -from ....types.providers.anthropic.key_list_response import KeyListResponse -from ....types.providers.anthropic.key_create_response import KeyCreateResponse -from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse -from ....types.providers.anthropic.key_update_response import KeyUpdateResponse -from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse +from ....types.model_providers.anthropic import ( + key_list_params, + key_create_params, + key_update_params, + key_list_agents_params, +) +from ....types.model_providers.anthropic.key_list_response import KeyListResponse +from ....types.model_providers.anthropic.key_create_response import KeyCreateResponse +from ....types.model_providers.anthropic.key_delete_response import KeyDeleteResponse +from ....types.model_providers.anthropic.key_update_response import KeyUpdateResponse +from ....types.model_providers.anthropic.key_retrieve_response import KeyRetrieveResponse +from ....types.model_providers.anthropic.key_list_agents_response import KeyListAgentsResponse __all__ = ["KeysResource", "AsyncKeysResource"] diff --git a/src/do_gradientai/resources/providers/providers.py b/src/do_gradientai/resources/model_providers/model_providers.py similarity index 61% rename from src/do_gradientai/resources/providers/providers.py rename to src/do_gradientai/resources/model_providers/model_providers.py index ef942f73..cf710ecf 100644 --- a/src/do_gradientai/resources/providers/providers.py +++ b/src/do_gradientai/resources/model_providers/model_providers.py @@ -21,10 +21,10 @@ AsyncAnthropicResourceWithStreamingResponse, ) -__all__ = ["ProvidersResource", "AsyncProvidersResource"] +__all__ = ["ModelProvidersResource", "AsyncModelProvidersResource"] -class ProvidersResource(SyncAPIResource): +class ModelProvidersResource(SyncAPIResource): @cached_property def anthropic(self) -> AnthropicResource: return AnthropicResource(self._client) @@ -34,26 +34,26 @@ def openai(self) -> OpenAIResource: return OpenAIResource(self._client) @cached_property - def with_raw_response(self) -> ProvidersResourceWithRawResponse: + def with_raw_response(self) -> ModelProvidersResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return ProvidersResourceWithRawResponse(self) + return ModelProvidersResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: + def with_streaming_response(self) -> ModelProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return ProvidersResourceWithStreamingResponse(self) + return ModelProvidersResourceWithStreamingResponse(self) -class AsyncProvidersResource(AsyncAPIResource): +class AsyncModelProvidersResource(AsyncAPIResource): @cached_property def anthropic(self) -> AsyncAnthropicResource: return AsyncAnthropicResource(self._client) @@ -63,72 +63,72 @@ def openai(self) -> AsyncOpenAIResource: return AsyncOpenAIResource(self._client) @cached_property - def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: + def with_raw_response(self) -> AsyncModelProvidersResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return AsyncProvidersResourceWithRawResponse(self) + return AsyncModelProvidersResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncModelProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return AsyncProvidersResourceWithStreamingResponse(self) + return AsyncModelProvidersResourceWithStreamingResponse(self) -class ProvidersResourceWithRawResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers +class ModelProvidersResourceWithRawResponse: + def __init__(self, model_providers: ModelProvidersResource) -> None: + self._model_providers = model_providers @cached_property def anthropic(self) -> AnthropicResourceWithRawResponse: - return AnthropicResourceWithRawResponse(self._providers.anthropic) + return AnthropicResourceWithRawResponse(self._model_providers.anthropic) @cached_property def openai(self) -> OpenAIResourceWithRawResponse: - return OpenAIResourceWithRawResponse(self._providers.openai) + return OpenAIResourceWithRawResponse(self._model_providers.openai) -class AsyncProvidersResourceWithRawResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers +class AsyncModelProvidersResourceWithRawResponse: + def __init__(self, model_providers: AsyncModelProvidersResource) -> None: + self._model_providers = model_providers @cached_property def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: - return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) + return AsyncAnthropicResourceWithRawResponse(self._model_providers.anthropic) @cached_property def openai(self) -> AsyncOpenAIResourceWithRawResponse: - return AsyncOpenAIResourceWithRawResponse(self._providers.openai) + return AsyncOpenAIResourceWithRawResponse(self._model_providers.openai) -class ProvidersResourceWithStreamingResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers +class ModelProvidersResourceWithStreamingResponse: + def __init__(self, model_providers: ModelProvidersResource) -> None: + self._model_providers = model_providers @cached_property def anthropic(self) -> AnthropicResourceWithStreamingResponse: - return AnthropicResourceWithStreamingResponse(self._providers.anthropic) + return AnthropicResourceWithStreamingResponse(self._model_providers.anthropic) @cached_property def openai(self) -> OpenAIResourceWithStreamingResponse: - return OpenAIResourceWithStreamingResponse(self._providers.openai) + return OpenAIResourceWithStreamingResponse(self._model_providers.openai) -class AsyncProvidersResourceWithStreamingResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers +class AsyncModelProvidersResourceWithStreamingResponse: + def __init__(self, model_providers: AsyncModelProvidersResource) -> None: + self._model_providers = model_providers @cached_property def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: - return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) + return AsyncAnthropicResourceWithStreamingResponse(self._model_providers.anthropic) @cached_property def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: - return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) + return AsyncOpenAIResourceWithStreamingResponse(self._model_providers.openai) diff --git a/src/do_gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/model_providers/openai/__init__.py similarity index 100% rename from src/do_gradientai/resources/providers/openai/__init__.py rename to src/do_gradientai/resources/model_providers/openai/__init__.py diff --git a/src/do_gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/model_providers/openai/keys.py similarity index 97% rename from src/do_gradientai/resources/providers/openai/keys.py rename to src/do_gradientai/resources/model_providers/openai/keys.py index 01cfee75..fb974808 100644 --- a/src/do_gradientai/resources/providers/openai/keys.py +++ b/src/do_gradientai/resources/model_providers/openai/keys.py @@ -15,13 +15,18 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options -from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params -from ....types.providers.openai.key_list_response import KeyListResponse -from ....types.providers.openai.key_create_response import KeyCreateResponse -from ....types.providers.openai.key_delete_response import KeyDeleteResponse -from ....types.providers.openai.key_update_response import KeyUpdateResponse -from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse +from ....types.model_providers.openai import ( + key_list_params, + key_create_params, + key_update_params, + key_retrieve_agents_params, +) +from ....types.model_providers.openai.key_list_response import KeyListResponse +from ....types.model_providers.openai.key_create_response import KeyCreateResponse +from ....types.model_providers.openai.key_delete_response import KeyDeleteResponse +from ....types.model_providers.openai.key_update_response import KeyUpdateResponse +from ....types.model_providers.openai.key_retrieve_response import KeyRetrieveResponse +from ....types.model_providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse __all__ = ["KeysResource", "AsyncKeysResource"] diff --git a/src/do_gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/model_providers/openai/openai.py similarity index 100% rename from src/do_gradientai/resources/providers/openai/openai.py rename to src/do_gradientai/resources/model_providers/openai/openai.py diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py index 7e100741..1dd18511 100644 --- a/src/do_gradientai/types/agents/__init__.py +++ b/src/do_gradientai/types/agents/__init__.py @@ -5,8 +5,12 @@ from .api_meta import APIMeta as APIMeta from .api_links import APILinks as APILinks from .api_star_metric import APIStarMetric as APIStarMetric +from .route_add_params import RouteAddParams as RouteAddParams from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun +from .route_add_response import RouteAddResponse as RouteAddResponse from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .route_update_params import RouteUpdateParams as RouteUpdateParams +from .route_view_response import RouteViewResponse as RouteViewResponse from .version_list_params import VersionListParams as VersionListParams from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric from .api_evaluation_prompt import APIEvaluationPrompt as APIEvaluationPrompt @@ -14,9 +18,10 @@ from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam +from .route_delete_response import RouteDeleteResponse as RouteDeleteResponse +from .route_update_response import RouteUpdateResponse as RouteUpdateResponse from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams -from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams from .function_create_params import FunctionCreateParams as FunctionCreateParams from .function_update_params import FunctionUpdateParams as FunctionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse @@ -24,15 +29,10 @@ from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase -from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse from .function_create_response import FunctionCreateResponse as FunctionCreateResponse from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse -from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams -from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse -from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse -from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput diff --git a/src/do_gradientai/types/agents/child_agent_add_params.py b/src/do_gradientai/types/agents/route_add_params.py similarity index 87% rename from src/do_gradientai/types/agents/child_agent_add_params.py rename to src/do_gradientai/types/agents/route_add_params.py index 001baa6f..b4fcb417 100644 --- a/src/do_gradientai/types/agents/child_agent_add_params.py +++ b/src/do_gradientai/types/agents/route_add_params.py @@ -6,10 +6,10 @@ from ..._utils import PropertyInfo -__all__ = ["ChildAgentAddParams"] +__all__ = ["RouteAddParams"] -class ChildAgentAddParams(TypedDict, total=False): +class RouteAddParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] diff --git a/src/do_gradientai/types/agents/child_agent_add_response.py b/src/do_gradientai/types/agents/route_add_response.py similarity index 79% rename from src/do_gradientai/types/agents/child_agent_add_response.py rename to src/do_gradientai/types/agents/route_add_response.py index baccec10..cd3bb16a 100644 --- a/src/do_gradientai/types/agents/child_agent_add_response.py +++ b/src/do_gradientai/types/agents/route_add_response.py @@ -4,10 +4,10 @@ from ..._models import BaseModel -__all__ = ["ChildAgentAddResponse"] +__all__ = ["RouteAddResponse"] -class ChildAgentAddResponse(BaseModel): +class RouteAddResponse(BaseModel): child_agent_uuid: Optional[str] = None parent_agent_uuid: Optional[str] = None diff --git a/src/do_gradientai/types/agents/child_agent_delete_response.py b/src/do_gradientai/types/agents/route_delete_response.py similarity index 74% rename from src/do_gradientai/types/agents/child_agent_delete_response.py rename to src/do_gradientai/types/agents/route_delete_response.py index b50fb024..07105a62 100644 --- a/src/do_gradientai/types/agents/child_agent_delete_response.py +++ b/src/do_gradientai/types/agents/route_delete_response.py @@ -4,10 +4,10 @@ from ..._models import BaseModel -__all__ = ["ChildAgentDeleteResponse"] +__all__ = ["RouteDeleteResponse"] -class ChildAgentDeleteResponse(BaseModel): +class RouteDeleteResponse(BaseModel): child_agent_uuid: Optional[str] = None parent_agent_uuid: Optional[str] = None diff --git a/src/do_gradientai/types/agents/child_agent_update_params.py b/src/do_gradientai/types/agents/route_update_params.py similarity index 86% rename from src/do_gradientai/types/agents/child_agent_update_params.py rename to src/do_gradientai/types/agents/route_update_params.py index 2f009a52..cb6d6391 100644 --- a/src/do_gradientai/types/agents/child_agent_update_params.py +++ b/src/do_gradientai/types/agents/route_update_params.py @@ -6,10 +6,10 @@ from ..._utils import PropertyInfo -__all__ = ["ChildAgentUpdateParams"] +__all__ = ["RouteUpdateParams"] -class ChildAgentUpdateParams(TypedDict, total=False): +class RouteUpdateParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] diff --git a/src/do_gradientai/types/agents/child_agent_update_response.py b/src/do_gradientai/types/agents/route_update_response.py similarity index 81% rename from src/do_gradientai/types/agents/child_agent_update_response.py rename to src/do_gradientai/types/agents/route_update_response.py index 48a13c72..75e1eda5 100644 --- a/src/do_gradientai/types/agents/child_agent_update_response.py +++ b/src/do_gradientai/types/agents/route_update_response.py @@ -4,10 +4,10 @@ from ..._models import BaseModel -__all__ = ["ChildAgentUpdateResponse"] +__all__ = ["RouteUpdateResponse"] -class ChildAgentUpdateResponse(BaseModel): +class RouteUpdateResponse(BaseModel): child_agent_uuid: Optional[str] = None parent_agent_uuid: Optional[str] = None diff --git a/src/do_gradientai/types/agents/child_agent_view_response.py b/src/do_gradientai/types/agents/route_view_response.py similarity index 78% rename from src/do_gradientai/types/agents/child_agent_view_response.py rename to src/do_gradientai/types/agents/route_view_response.py index ffbaef12..dd9af70b 100644 --- a/src/do_gradientai/types/agents/child_agent_view_response.py +++ b/src/do_gradientai/types/agents/route_view_response.py @@ -6,10 +6,10 @@ from ..._models import BaseModel -__all__ = ["ChildAgentViewResponse"] +__all__ = ["RouteViewResponse"] -class ChildAgentViewResponse(BaseModel): +class RouteViewResponse(BaseModel): children: Optional[List["APIAgent"]] = None diff --git a/src/do_gradientai/types/providers/__init__.py b/src/do_gradientai/types/model_providers/__init__.py similarity index 100% rename from src/do_gradientai/types/providers/__init__.py rename to src/do_gradientai/types/model_providers/__init__.py diff --git a/src/do_gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/model_providers/anthropic/__init__.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/__init__.py rename to src/do_gradientai/types/model_providers/anthropic/__init__.py diff --git a/src/do_gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/model_providers/anthropic/key_create_params.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_create_params.py rename to src/do_gradientai/types/model_providers/anthropic/key_create_params.py diff --git a/src/do_gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/model_providers/anthropic/key_create_response.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_create_response.py rename to src/do_gradientai/types/model_providers/anthropic/key_create_response.py diff --git a/src/do_gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/model_providers/anthropic/key_delete_response.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_delete_response.py rename to src/do_gradientai/types/model_providers/anthropic/key_delete_response.py diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_list_agents_params.py rename to src/do_gradientai/types/model_providers/anthropic/key_list_agents_params.py diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_list_agents_response.py rename to src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py diff --git a/src/do_gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/model_providers/anthropic/key_list_params.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_list_params.py rename to src/do_gradientai/types/model_providers/anthropic/key_list_params.py diff --git a/src/do_gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_response.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_list_response.py rename to src/do_gradientai/types/model_providers/anthropic/key_list_response.py diff --git a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/model_providers/anthropic/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_retrieve_response.py rename to src/do_gradientai/types/model_providers/anthropic/key_retrieve_response.py diff --git a/src/do_gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/model_providers/anthropic/key_update_params.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_update_params.py rename to src/do_gradientai/types/model_providers/anthropic/key_update_params.py diff --git a/src/do_gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/model_providers/anthropic/key_update_response.py similarity index 100% rename from src/do_gradientai/types/providers/anthropic/key_update_response.py rename to src/do_gradientai/types/model_providers/anthropic/key_update_response.py diff --git a/src/do_gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/model_providers/openai/__init__.py similarity index 100% rename from src/do_gradientai/types/providers/openai/__init__.py rename to src/do_gradientai/types/model_providers/openai/__init__.py diff --git a/src/do_gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/model_providers/openai/key_create_params.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_create_params.py rename to src/do_gradientai/types/model_providers/openai/key_create_params.py diff --git a/src/do_gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/model_providers/openai/key_create_response.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_create_response.py rename to src/do_gradientai/types/model_providers/openai/key_create_response.py diff --git a/src/do_gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/model_providers/openai/key_delete_response.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_delete_response.py rename to src/do_gradientai/types/model_providers/openai/key_delete_response.py diff --git a/src/do_gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/model_providers/openai/key_list_params.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_list_params.py rename to src/do_gradientai/types/model_providers/openai/key_list_params.py diff --git a/src/do_gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/model_providers/openai/key_list_response.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_list_response.py rename to src/do_gradientai/types/model_providers/openai/key_list_response.py diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_params.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py rename to src/do_gradientai/types/model_providers/openai/key_retrieve_agents_params.py diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py rename to src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_retrieve_response.py rename to src/do_gradientai/types/model_providers/openai/key_retrieve_response.py diff --git a/src/do_gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/model_providers/openai/key_update_params.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_update_params.py rename to src/do_gradientai/types/model_providers/openai/key_update_params.py diff --git a/src/do_gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/model_providers/openai/key_update_response.py similarity index 100% rename from src/do_gradientai/types/providers/openai/key_update_response.py rename to src/do_gradientai/types/model_providers/openai/key_update_response.py diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_routes.py similarity index 69% rename from tests/api_resources/agents/test_child_agents.py rename to tests/api_resources/agents/test_routes.py index c5108463..e2e85ab8 100644 --- a/tests/api_resources/agents/test_child_agents.py +++ b/tests/api_resources/agents/test_routes.py @@ -10,31 +10,31 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI from do_gradientai.types.agents import ( - ChildAgentAddResponse, - ChildAgentViewResponse, - ChildAgentDeleteResponse, - ChildAgentUpdateResponse, + RouteAddResponse, + RouteViewResponse, + RouteDeleteResponse, + RouteUpdateResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestChildAgents: +class TestRoutes: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.update( + route = client.agents.routes.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + assert_matches_type(RouteUpdateResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.update( + route = client.agents.routes.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -43,33 +43,33 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: route_name="route_name", uuid="uuid", ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + assert_matches_type(RouteUpdateResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.update( + response = client.agents.routes.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteUpdateResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.update( + with client.agents.routes.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteUpdateResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -79,13 +79,13 @@ def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.agents.child_agents.with_raw_response.update( + client.agents.routes.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.update( + client.agents.routes.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -93,37 +93,37 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.delete( + route = client.agents.routes.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + assert_matches_type(RouteDeleteResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.delete( + response = client.agents.routes.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteDeleteResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.delete( + with client.agents.routes.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteDeleteResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( + client.agents.routes.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( + client.agents.routes.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -145,16 +145,16 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.add( + route = client.agents.routes.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + assert_matches_type(RouteAddResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.add( + route = client.agents.routes.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -162,33 +162,33 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: body_parent_agent_uuid="parent_agent_uuid", route_name="route_name", ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + assert_matches_type(RouteAddResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_add(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.add( + response = client.agents.routes.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteAddResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.add( + with client.agents.routes.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteAddResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -198,13 +198,13 @@ def test_path_params_add(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.agents.child_agents.with_raw_response.add( + client.agents.routes.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.add( + client.agents.routes.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -212,34 +212,34 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_view(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.view( + route = client.agents.routes.view( "uuid", ) - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + assert_matches_type(RouteViewResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_view(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.view( + response = client.agents.routes.with_raw_response.view( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteViewResponse, route, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_view(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.view( + with client.agents.routes.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + route = response.parse() + assert_matches_type(RouteViewResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -247,12 +247,12 @@ def test_streaming_response_view(self, client: GradientAI) -> None: @parametrize def test_path_params_view(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.child_agents.with_raw_response.view( + client.agents.routes.with_raw_response.view( "", ) -class TestAsyncChildAgents: +class TestAsyncRoutes: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) @@ -260,16 +260,16 @@ class TestAsyncChildAgents: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.update( + route = await async_client.agents.routes.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + assert_matches_type(RouteUpdateResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.update( + route = await async_client.agents.routes.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -278,33 +278,33 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI route_name="route_name", uuid="uuid", ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + assert_matches_type(RouteUpdateResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.update( + response = await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteUpdateResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.update( + async with async_client.agents.routes.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteUpdateResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -314,13 +314,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.agents.child_agents.with_raw_response.update( + await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.update( + await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -328,37 +328,37 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.delete( + route = await async_client.agents.routes.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + assert_matches_type(RouteDeleteResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.delete( + response = await async_client.agents.routes.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteDeleteResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.delete( + async with async_client.agents.routes.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteDeleteResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -366,13 +366,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( + await async_client.agents.routes.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( + await async_client.agents.routes.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -380,16 +380,16 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.add( + route = await async_client.agents.routes.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + assert_matches_type(RouteAddResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.add( + route = await async_client.agents.routes.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -397,33 +397,33 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - body_parent_agent_uuid="parent_agent_uuid", route_name="route_name", ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + assert_matches_type(RouteAddResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.add( + response = await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteAddResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.add( + async with async_client.agents.routes.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteAddResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -433,13 +433,13 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.agents.child_agents.with_raw_response.add( + await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.add( + await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -447,34 +447,34 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_view(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.view( + route = await async_client.agents.routes.view( "uuid", ) - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + assert_matches_type(RouteViewResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.view( + response = await async_client.agents.routes.with_raw_response.view( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteViewResponse, route, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.view( + async with async_client.agents.routes.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + route = await response.parse() + assert_matches_type(RouteViewResponse, route, path=["response"]) assert cast(Any, response.is_closed) is True @@ -482,6 +482,6 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.view( + await async_client.agents.routes.with_raw_response.view( "", ) diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/model_providers/__init__.py similarity index 100% rename from tests/api_resources/providers/__init__.py rename to tests/api_resources/model_providers/__init__.py diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/model_providers/anthropic/__init__.py similarity index 100% rename from tests/api_resources/providers/anthropic/__init__.py rename to tests/api_resources/model_providers/anthropic/__init__.py diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/model_providers/anthropic/test_keys.py similarity index 80% rename from tests/api_resources/providers/anthropic/test_keys.py rename to tests/api_resources/model_providers/anthropic/test_keys.py index 7aa595f7..b6ba0e9a 100644 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ b/tests/api_resources/model_providers/anthropic/test_keys.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.providers.anthropic import ( +from do_gradientai.types.model_providers.anthropic import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, @@ -27,13 +27,13 @@ class TestKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.create() + key = client.model_providers.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.create( + key = client.model_providers.anthropic.keys.create( api_key="api_key", name="name", ) @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.create() + response = client.model_providers.anthropic.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.create() as response: + with client.model_providers.anthropic.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.retrieve( + key = client.model_providers.anthropic.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.retrieve( + response = client.model_providers.anthropic.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.retrieve( + with client.model_providers.anthropic.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.retrieve( + client.model_providers.anthropic.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.update( + key = client.model_providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -114,7 +114,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.update( + key = client.model_providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.update( + response = client.model_providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -137,7 +137,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.update( + with client.model_providers.anthropic.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -152,20 +152,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.update( + client.model_providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list() + key = client.model_providers.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list( + key = client.model_providers.anthropic.keys.list( page=0, per_page=0, ) @@ -174,7 +174,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.list() + response = client.model_providers.anthropic.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -184,7 +184,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.list() as response: + with client.model_providers.anthropic.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -196,7 +196,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.delete( + key = client.model_providers.anthropic.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -204,7 +204,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.delete( + response = client.model_providers.anthropic.keys.with_raw_response.delete( "api_key_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.delete( + with client.model_providers.anthropic.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -231,14 +231,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.delete( + client.model_providers.anthropic.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_list_agents(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list_agents( + key = client.model_providers.anthropic.keys.list_agents( uuid="uuid", ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) @@ -246,7 +246,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: - key = client.providers.anthropic.keys.list_agents( + key = client.model_providers.anthropic.keys.list_agents( uuid="uuid", page=0, per_page=0, @@ -256,7 +256,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list_agents(self, client: GradientAI) -> None: - response = client.providers.anthropic.keys.with_raw_response.list_agents( + response = client.model_providers.anthropic.keys.with_raw_response.list_agents( uuid="uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list_agents(self, client: GradientAI) -> None: - with client.providers.anthropic.keys.with_streaming_response.list_agents( + with client.model_providers.anthropic.keys.with_streaming_response.list_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -283,7 +283,7 @@ def test_streaming_response_list_agents(self, client: GradientAI) -> None: @parametrize def test_path_params_list_agents(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.list_agents( + client.model_providers.anthropic.keys.with_raw_response.list_agents( uuid="", ) @@ -296,13 +296,13 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.create() + key = await async_client.model_providers.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.create( + key = await async_client.model_providers.anthropic.keys.create( api_key="api_key", name="name", ) @@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.create() + response = await async_client.model_providers.anthropic.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -321,7 +321,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: + async with async_client.model_providers.anthropic.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -333,7 +333,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.retrieve( + key = await async_client.model_providers.anthropic.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -341,7 +341,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( + response = await async_client.model_providers.anthropic.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -353,7 +353,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( + async with async_client.model_providers.anthropic.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -368,14 +368,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.retrieve( + await async_client.model_providers.anthropic.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.update( + key = await async_client.model_providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -383,7 +383,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.update( + key = await async_client.model_providers.anthropic.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -394,7 +394,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.update( + response = await async_client.model_providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -406,7 +406,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.update( + async with async_client.model_providers.anthropic.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -421,20 +421,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.update( + await async_client.model_providers.anthropic.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list() + key = await async_client.model_providers.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list( + key = await async_client.model_providers.anthropic.keys.list( page=0, per_page=0, ) @@ -443,7 +443,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list() + response = await async_client.model_providers.anthropic.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -453,7 +453,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: + async with async_client.model_providers.anthropic.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -465,7 +465,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.delete( + key = await async_client.model_providers.anthropic.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -473,7 +473,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.delete( + response = await async_client.model_providers.anthropic.keys.with_raw_response.delete( "api_key_uuid", ) @@ -485,7 +485,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.delete( + async with async_client.model_providers.anthropic.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -500,14 +500,14 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.delete( + await async_client.model_providers.anthropic.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list_agents( + key = await async_client.model_providers.anthropic.keys.list_agents( uuid="uuid", ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) @@ -515,7 +515,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.anthropic.keys.list_agents( + key = await async_client.model_providers.anthropic.keys.list_agents( uuid="uuid", page=0, per_page=0, @@ -525,7 +525,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( + response = await async_client.model_providers.anthropic.keys.with_raw_response.list_agents( uuid="uuid", ) @@ -537,7 +537,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( + async with async_client.model_providers.anthropic.keys.with_streaming_response.list_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -552,6 +552,6 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradientA @parametrize async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.list_agents( + await async_client.model_providers.anthropic.keys.with_raw_response.list_agents( uuid="", ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/model_providers/openai/__init__.py similarity index 100% rename from tests/api_resources/providers/openai/__init__.py rename to tests/api_resources/model_providers/openai/__init__.py diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/model_providers/openai/test_keys.py similarity index 81% rename from tests/api_resources/providers/openai/test_keys.py rename to tests/api_resources/model_providers/openai/test_keys.py index 714dc4bd..b398f5cc 100644 --- a/tests/api_resources/providers/openai/test_keys.py +++ b/tests/api_resources/model_providers/openai/test_keys.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.providers.openai import ( +from do_gradientai.types.model_providers.openai import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, @@ -27,13 +27,13 @@ class TestKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - key = client.providers.openai.keys.create() + key = client.model_providers.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.create( + key = client.model_providers.openai.keys.create( api_key="api_key", name="name", ) @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.create() + response = client.model_providers.openai.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.create() as response: + with client.model_providers.openai.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - key = client.providers.openai.keys.retrieve( + key = client.model_providers.openai.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve( + response = client.model_providers.openai.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve( + with client.model_providers.openai.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve( + client.model_providers.openai.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - key = client.providers.openai.keys.update( + key = client.model_providers.openai.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -114,7 +114,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.update( + key = client.model_providers.openai.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.update( + response = client.model_providers.openai.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -137,7 +137,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.update( + with client.model_providers.openai.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -152,20 +152,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.update( + client.model_providers.openai.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - key = client.providers.openai.keys.list() + key = client.model_providers.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.list( + key = client.model_providers.openai.keys.list( page=0, per_page=0, ) @@ -174,7 +174,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.list() + response = client.model_providers.openai.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -184,7 +184,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.list() as response: + with client.model_providers.openai.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -196,7 +196,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - key = client.providers.openai.keys.delete( + key = client.model_providers.openai.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -204,7 +204,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.delete( + response = client.model_providers.openai.keys.with_raw_response.delete( "api_key_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.delete( + with client.model_providers.openai.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -231,14 +231,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.delete( + client.model_providers.openai.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_retrieve_agents(self, client: GradientAI) -> None: - key = client.providers.openai.keys.retrieve_agents( + key = client.model_providers.openai.keys.retrieve_agents( uuid="uuid", ) assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) @@ -246,7 +246,7 @@ def test_method_retrieve_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: - key = client.providers.openai.keys.retrieve_agents( + key = client.model_providers.openai.keys.retrieve_agents( uuid="uuid", page=0, per_page=0, @@ -256,7 +256,7 @@ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve_agents( + response = client.model_providers.openai.keys.with_raw_response.retrieve_agents( uuid="uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve_agents( + with client.model_providers.openai.keys.with_streaming_response.retrieve_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -283,7 +283,7 @@ def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve_agents(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve_agents( + client.model_providers.openai.keys.with_raw_response.retrieve_agents( uuid="", ) @@ -296,13 +296,13 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.create() + key = await async_client.model_providers.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.create( + key = await async_client.model_providers.openai.keys.create( api_key="api_key", name="name", ) @@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.create() + response = await async_client.model_providers.openai.keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -321,7 +321,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.create() as response: + async with async_client.model_providers.openai.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -333,7 +333,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.retrieve( + key = await async_client.model_providers.openai.keys.retrieve( "api_key_uuid", ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -341,7 +341,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve( + response = await async_client.model_providers.openai.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -353,7 +353,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve( + async with async_client.model_providers.openai.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed @@ -368,14 +368,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve( + await async_client.model_providers.openai.keys.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.update( + key = await async_client.model_providers.openai.keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) @@ -383,7 +383,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.update( + key = await async_client.model_providers.openai.keys.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", @@ -394,7 +394,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.update( + response = await async_client.model_providers.openai.keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -406,7 +406,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.update( + async with async_client.model_providers.openai.keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -421,20 +421,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.update( + await async_client.model_providers.openai.keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.list() + key = await async_client.model_providers.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.list( + key = await async_client.model_providers.openai.keys.list( page=0, per_page=0, ) @@ -443,7 +443,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.list() + response = await async_client.model_providers.openai.keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -453,7 +453,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.list() as response: + async with async_client.model_providers.openai.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -465,7 +465,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.delete( + key = await async_client.model_providers.openai.keys.delete( "api_key_uuid", ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -473,7 +473,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.delete( + response = await async_client.model_providers.openai.keys.with_raw_response.delete( "api_key_uuid", ) @@ -485,7 +485,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.delete( + async with async_client.model_providers.openai.keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -500,14 +500,14 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.delete( + await async_client.model_providers.openai.keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( + key = await async_client.model_providers.openai.keys.retrieve_agents( uuid="uuid", ) assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) @@ -515,7 +515,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( + key = await async_client.model_providers.openai.keys.retrieve_agents( uuid="uuid", page=0, per_page=0, @@ -525,7 +525,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + response = await async_client.model_providers.openai.keys.with_raw_response.retrieve_agents( uuid="uuid", ) @@ -537,7 +537,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( + async with async_client.model_providers.openai.keys.with_streaming_response.retrieve_agents( uuid="uuid", ) as response: assert not response.is_closed @@ -552,6 +552,6 @@ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradi @parametrize async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + await async_client.model_providers.openai.keys.with_raw_response.retrieve_agents( uuid="", ) From 8a3581362f7708a3129ea908c681d916cccfe7d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 06:44:20 +0000 Subject: [PATCH 072/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 49 +++++++++++++++++++++++++++++++++++++------------ 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/.stats.yml b/.stats.yml index d58c3c34..4fb13307 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a5bfbbd032355b26ddd41d659c93495b +config_hash: fc55dd4870b7f5b1f319fffe9a0c5b74 diff --git a/README.md b/README.md index 87682600..54395c64 100644 --- a/README.md +++ b/README.md @@ -31,10 +31,16 @@ client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) -versions = client.agents.versions.list( - uuid="REPLACE_ME", +completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", ) -print(versions.agent_versions) +print(completion.id) ``` While you can provide an `api_key` keyword argument, @@ -57,10 +63,16 @@ client = AsyncGradientAI( async def main() -> None: - versions = await client.agents.versions.list( - uuid="REPLACE_ME", + completion = await client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", ) - print(versions.agent_versions) + print(completion.id) asyncio.run(main()) @@ -93,10 +105,16 @@ async def main() -> None: api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: - versions = await client.agents.versions.list( - uuid="REPLACE_ME", + completion = await client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", ) - print(versions.agent_versions) + print(completion.id) asyncio.run(main()) @@ -120,10 +138,17 @@ from do_gradientai import GradientAI client = GradientAI() -evaluation_test_case = client.agents.evaluation_test_cases.create( - star_metric={}, +completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream_options={}, ) -print(evaluation_test_case.star_metric) +print(completion.stream_options) ``` ## Handling errors From 8409a271767af75952f7052c4664004c98e0b0c9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 07:35:42 +0000 Subject: [PATCH 073/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- src/do_gradientai/_client.py | 65 +++++++-- tests/conftest.py | 11 +- tests/test_client.py | 269 ++++++++++++++++++++++++++++------- 4 files changed, 280 insertions(+), 67 deletions(-) diff --git a/.stats.yml b/.stats.yml index 4fb13307..1246506f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: fc55dd4870b7f5b1f319fffe9a0c5b74 +config_hash: dd2b5f3f77ea08c6062115a56c3367ee diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index a57125ee..4dc1c952 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -13,6 +13,7 @@ from ._types import ( NOT_GIVEN, Omit, + Headers, Timeout, NotGiven, Transport, @@ -23,7 +24,7 @@ from ._compat import cached_property from ._version import __version__ from ._streaming import Stream as Stream, AsyncStream as AsyncStream -from ._exceptions import APIStatusError, GradientAIError +from ._exceptions import APIStatusError from ._base_client import ( DEFAULT_MAX_RETRIES, SyncAPIClient, @@ -54,12 +55,14 @@ class GradientAI(SyncAPIClient): # client options - api_key: str + api_key: str | None + inference_key: str | None def __init__( self, *, api_key: str | None = None, + inference_key: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -81,16 +84,18 @@ def __init__( ) -> None: """Construct a new synchronous GradientAI client instance. - This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided. + This automatically infers the following arguments from their corresponding environment variables if they are not provided: + - `api_key` from `GRADIENTAI_API_KEY` + - `inference_key` from `GRADIENTAI_API_KEY` """ if api_key is None: api_key = os.environ.get("GRADIENTAI_API_KEY") - if api_key is None: - raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable" - ) self.api_key = api_key + if inference_key is None: + inference_key = os.environ.get("GRADIENTAI_API_KEY") + self.inference_key = inference_key + if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") self._base_url_overridden = base_url is not None @@ -167,6 +172,8 @@ def qs(self) -> Querystring: @override def auth_headers(self) -> dict[str, str]: api_key = self.api_key + if api_key is None: + return {} return {"Authorization": f"Bearer {api_key}"} @property @@ -178,10 +185,22 @@ def default_headers(self) -> dict[str, str | Omit]: **self._custom_headers, } + @override + def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: + if self.api_key and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + + raise TypeError( + '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"' + ) + def copy( self, *, api_key: str | None = None, + inference_key: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, @@ -216,6 +235,7 @@ def copy( http_client = http_client or self._client client = self.__class__( api_key=api_key or self.api_key, + inference_key=inference_key or self.inference_key, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -267,12 +287,14 @@ def _make_status_error( class AsyncGradientAI(AsyncAPIClient): # client options - api_key: str + api_key: str | None + inference_key: str | None def __init__( self, *, api_key: str | None = None, + inference_key: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -294,16 +316,18 @@ def __init__( ) -> None: """Construct a new async AsyncGradientAI client instance. - This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided. + This automatically infers the following arguments from their corresponding environment variables if they are not provided: + - `api_key` from `GRADIENTAI_API_KEY` + - `inference_key` from `GRADIENTAI_API_KEY` """ if api_key is None: api_key = os.environ.get("GRADIENTAI_API_KEY") - if api_key is None: - raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable" - ) self.api_key = api_key + if inference_key is None: + inference_key = os.environ.get("GRADIENTAI_API_KEY") + self.inference_key = inference_key + if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") self._base_url_overridden = base_url is not None @@ -380,6 +404,8 @@ def qs(self) -> Querystring: @override def auth_headers(self) -> dict[str, str]: api_key = self.api_key + if api_key is None: + return {} return {"Authorization": f"Bearer {api_key}"} @property @@ -391,10 +417,22 @@ def default_headers(self) -> dict[str, str | Omit]: **self._custom_headers, } + @override + def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: + if self.api_key and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + + raise TypeError( + '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"' + ) + def copy( self, *, api_key: str | None = None, + inference_key: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, @@ -429,6 +467,7 @@ def copy( http_client = http_client or self._client client = self.__class__( api_key=api_key or self.api_key, + inference_key=inference_key or self.inference_key, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, diff --git a/tests/conftest.py b/tests/conftest.py index daa5b955..6048de1a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -46,6 +46,7 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") api_key = "My API Key" +inference_key = "My Inference Key" @pytest.fixture(scope="session") @@ -54,7 +55,9 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]: if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - with GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + with GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=strict + ) as client: yield client @@ -79,6 +82,10 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") async with AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=strict, + http_client=http_client, ) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index 920275ae..44dbc938 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -24,7 +24,7 @@ from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError from do_gradientai._types import Omit from do_gradientai._models import BaseModel, FinalRequestOptions -from do_gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError +from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from do_gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, @@ -38,6 +38,7 @@ base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") api_key = "My API Key" +inference_key = "My Inference Key" def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: @@ -59,7 +60,9 @@ def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int: class TestGradientAI: - client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) @pytest.mark.respx(base_url=base_url) def test_raw_response(self, respx_mock: MockRouter) -> None: @@ -89,6 +92,10 @@ def test_copy(self) -> None: assert copied.api_key == "another My API Key" assert self.client.api_key == "My API Key" + copied = self.client.copy(inference_key="another My Inference Key") + assert copied.inference_key == "another My Inference Key" + assert self.client.inference_key == "My Inference Key" + def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) @@ -107,7 +114,11 @@ def test_copy_default_options(self) -> None: def test_copy_default_headers(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, ) assert client.default_headers["X-Foo"] == "bar" @@ -141,7 +152,11 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_query={"foo": "bar"}, ) assert _get_params(client)["foo"] == "bar" @@ -267,7 +282,11 @@ def test_request_timeout(self) -> None: def test_client_timeout_option(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + timeout=httpx.Timeout(0), ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -278,7 +297,11 @@ def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + http_client=http_client, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -288,7 +311,11 @@ def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + http_client=http_client, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -298,7 +325,11 @@ def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + http_client=http_client, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -311,13 +342,18 @@ async def test_invalid_http_client(self) -> None: GradientAI( base_url=base_url, api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) def test_default_headers_option(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" @@ -326,6 +362,7 @@ def test_default_headers_option(self) -> None: client2 = GradientAI( base_url=base_url, api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -337,18 +374,35 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with pytest.raises(GradientAIError): - with update_env(**{"GRADIENTAI_API_KEY": Omit()}): - client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) - _ = client2 + with update_env(**{"GRADIENTAI_API_KEY": Omit()}): + client2 = GradientAI( + base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True + ) + + with pytest.raises( + TypeError, + match="Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted", + ): + client2._build_request(FinalRequestOptions(method="get", url="/foo")) + + request2 = client2._build_request( + FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()}) + ) + assert request2.headers.get("Authorization") is None def test_default_query_option(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_query={"query_param": "bar"}, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) url = httpx.URL(request.url) @@ -548,7 +602,12 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = GradientAI(base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True) + client = GradientAI( + base_url="https://example.com/from_init", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + ) assert client.base_url == "https://example.com/from_init/" client.base_url = "https://example.com/from_setter" # type: ignore[assignment] @@ -557,18 +616,22 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): - client = GradientAI(api_key=api_key, _strict_response_validation=True) + client = GradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ GradientAI( - base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ), GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -589,11 +652,15 @@ def test_base_url_trailing_slash(self, client: GradientAI) -> None: "client", [ GradientAI( - base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ), GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -614,11 +681,15 @@ def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: "client", [ GradientAI( - base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ), GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -636,7 +707,9 @@ def test_absolute_request_url(self, client: GradientAI) -> None: assert request.url == "https://myapi.com/foo" def test_copied_client_does_not_close_http(self) -> None: - client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) assert not client.is_closed() copied = client.copy() @@ -647,7 +720,9 @@ def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() def test_client_context_manager(self) -> None: - client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) with client as c2: assert c2 is client assert not c2.is_closed() @@ -669,7 +744,11 @@ class Model(BaseModel): def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): GradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + max_retries=cast(Any, None), ) @pytest.mark.respx(base_url=base_url) @@ -679,12 +758,16 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + strict_client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) + client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False + ) response = client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -712,7 +795,9 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) @@ -870,7 +955,9 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: class TestAsyncGradientAI: - client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @@ -902,6 +989,10 @@ def test_copy(self) -> None: assert copied.api_key == "another My API Key" assert self.client.api_key == "My API Key" + copied = self.client.copy(inference_key="another My Inference Key") + assert copied.inference_key == "another My Inference Key" + assert self.client.inference_key == "My Inference Key" + def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) @@ -920,7 +1011,11 @@ def test_copy_default_options(self) -> None: def test_copy_default_headers(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, ) assert client.default_headers["X-Foo"] == "bar" @@ -954,7 +1049,11 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_query={"foo": "bar"}, ) assert _get_params(client)["foo"] == "bar" @@ -1080,7 +1179,11 @@ async def test_request_timeout(self) -> None: async def test_client_timeout_option(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + timeout=httpx.Timeout(0), ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1091,7 +1194,11 @@ async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + http_client=http_client, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1101,7 +1208,11 @@ async def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + http_client=http_client, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1111,7 +1222,11 @@ async def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + http_client=http_client, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1124,13 +1239,18 @@ def test_invalid_http_client(self) -> None: AsyncGradientAI( base_url=base_url, api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) def test_default_headers_option(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_headers={"X-Foo": "bar"}, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" @@ -1139,6 +1259,7 @@ def test_default_headers_option(self) -> None: client2 = AsyncGradientAI( base_url=base_url, api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -1150,18 +1271,35 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with pytest.raises(GradientAIError): - with update_env(**{"GRADIENTAI_API_KEY": Omit()}): - client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) - _ = client2 + with update_env(**{"GRADIENTAI_API_KEY": Omit()}): + client2 = AsyncGradientAI( + base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True + ) + + with pytest.raises( + TypeError, + match="Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted", + ): + client2._build_request(FinalRequestOptions(method="get", url="/foo")) + + request2 = client2._build_request( + FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()}) + ) + assert request2.headers.get("Authorization") is None def test_default_query_option(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + default_query={"query_param": "bar"}, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) url = httpx.URL(request.url) @@ -1362,7 +1500,10 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = AsyncGradientAI( - base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True + base_url="https://example.com/from_init", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -1372,18 +1513,22 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): - client = AsyncGradientAI(api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ AsyncGradientAI( - base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ), AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1404,11 +1549,15 @@ def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: "client", [ AsyncGradientAI( - base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ), AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1429,11 +1578,15 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: "client", [ AsyncGradientAI( - base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True + base_url="http://localhost:5000/custom/path/", + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, ), AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, + inference_key=inference_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1451,7 +1604,9 @@ def test_absolute_request_url(self, client: AsyncGradientAI) -> None: assert request.url == "https://myapi.com/foo" async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) assert not client.is_closed() copied = client.copy() @@ -1463,7 +1618,9 @@ async def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) async with client as c2: assert c2 is client assert not c2.is_closed() @@ -1486,7 +1643,11 @@ class Model(BaseModel): async def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): AsyncGradientAI( - base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + _strict_response_validation=True, + max_retries=cast(Any, None), ) @pytest.mark.respx(base_url=base_url) @@ -1497,12 +1658,16 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + strict_client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) + client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False + ) response = await client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -1531,7 +1696,9 @@ class Model(BaseModel): @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI( + base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + ) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) From 1de9304c4201b297c22a434ebaeb6e80d6a384ee Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 07:36:05 +0000 Subject: [PATCH 074/200] feat(api): update via SDK Studio --- .stats.yml | 2 +- src/do_gradientai/_client.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index 1246506f..d96a651b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: dd2b5f3f77ea08c6062115a56c3367ee +config_hash: a1224bdbf22a97483c0e2d813b24423c diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index 4dc1c952..0020ed16 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -86,14 +86,14 @@ def __init__( This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `GRADIENTAI_API_KEY` - - `inference_key` from `GRADIENTAI_API_KEY` + - `inference_key` from `GRADIENTAI_INFERENCE_KEY` """ if api_key is None: api_key = os.environ.get("GRADIENTAI_API_KEY") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENTAI_API_KEY") + inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") self.inference_key = inference_key if base_url is None: @@ -318,14 +318,14 @@ def __init__( This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `GRADIENTAI_API_KEY` - - `inference_key` from `GRADIENTAI_API_KEY` + - `inference_key` from `GRADIENTAI_INFERENCE_KEY` """ if api_key is None: api_key = os.environ.get("GRADIENTAI_API_KEY") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENTAI_API_KEY") + inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") self.inference_key = inference_key if base_url is None: From 3f935404dc5a76c85cd0bc8c45ad035e6c80ed90 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 07:36:29 +0000 Subject: [PATCH 075/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index d96a651b..a3b45817 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a1224bdbf22a97483c0e2d813b24423c +config_hash: 318e79d212eb460dc120bed99c778b1e From c5d8f1199964bd42d6b3cf228b9b5700386b20bc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 07:36:52 +0000 Subject: [PATCH 076/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index a3b45817..24272762 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 318e79d212eb460dc120bed99c778b1e +config_hash: 7fec1a24eb493bd03fc0375fbbd5e5a7 From 11fb56bdf47d9f11ef3b861a1df8aa09357bf74e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:00:26 +0000 Subject: [PATCH 077/200] feat(api): define api links and meta as shared models --- .stats.yml | 2 +- api.md | 8 +++++++- src/do_gradientai/types/__init__.py | 1 + src/do_gradientai/types/agent_list_response.py | 4 ++-- src/do_gradientai/types/agents/__init__.py | 2 -- src/do_gradientai/types/agents/api_key_list_response.py | 4 ++-- src/do_gradientai/types/agents/version_list_response.py | 4 ++-- .../types/inference/api_key_list_response.py | 4 ++-- src/do_gradientai/types/knowledge_base_list_response.py | 4 ++-- .../types/knowledge_bases/data_source_list_response.py | 4 ++-- .../types/knowledge_bases/indexing_job_list_response.py | 4 ++-- src/do_gradientai/types/model_list_response.py | 4 ++-- .../model_providers/anthropic/key_list_agents_response.py | 4 ++-- .../types/model_providers/anthropic/key_list_response.py | 4 ++-- .../types/model_providers/openai/key_list_response.py | 4 ++-- .../openai/key_retrieve_agents_response.py | 4 ++-- src/do_gradientai/types/shared/__init__.py | 4 ++++ src/do_gradientai/types/{agents => shared}/api_links.py | 0 src/do_gradientai/types/{agents => shared}/api_meta.py | 0 19 files changed, 37 insertions(+), 28 deletions(-) create mode 100644 src/do_gradientai/types/shared/__init__.py rename src/do_gradientai/types/{agents => shared}/api_links.py (100%) rename src/do_gradientai/types/{agents => shared}/api_meta.py (100%) diff --git a/.stats.yml b/.stats.yml index 24272762..876bab8e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 67 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 7fec1a24eb493bd03fc0375fbbd5e5a7 +config_hash: 70cce9f06a7f98292ef13598418ed48d diff --git a/api.md b/api.md index 686761f9..3bce144e 100644 --- a/api.md +++ b/api.md @@ -1,3 +1,9 @@ +# Shared Types + +```python +from do_gradientai.types import APILinks, APIMeta +``` + # Agents Types: @@ -149,7 +155,7 @@ Methods: Types: ```python -from do_gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse +from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py index dde7f848..23cf1802 100644 --- a/src/do_gradientai/types/__init__.py +++ b/src/do_gradientai/types/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .shared import APIMeta as APIMeta, APILinks as APILinks from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement diff --git a/src/do_gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py index 97c0f0d5..397d9fd2 100644 --- a/src/do_gradientai/types/agent_list_response.py +++ b/src/do_gradientai/types/agent_list_response.py @@ -5,9 +5,9 @@ from typing_extensions import Literal from .._models import BaseModel -from .agents.api_meta import APIMeta from .api_agent_model import APIAgentModel -from .agents.api_links import APILinks +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod from .api_deployment_visibility import APIDeploymentVisibility diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py index 1dd18511..0060e12c 100644 --- a/src/do_gradientai/types/agents/__init__.py +++ b/src/do_gradientai/types/agents/__init__.py @@ -2,8 +2,6 @@ from __future__ import annotations -from .api_meta import APIMeta as APIMeta -from .api_links import APILinks as APILinks from .api_star_metric import APIStarMetric as APIStarMetric from .route_add_params import RouteAddParams as RouteAddParams from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun diff --git a/src/do_gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py index eff98649..aedb88ca 100644 --- a/src/do_gradientai/types/agents/api_key_list_response.py +++ b/src/do_gradientai/types/agents/api_key_list_response.py @@ -2,9 +2,9 @@ from typing import List, Optional -from .api_meta import APIMeta from ..._models import BaseModel -from .api_links import APILinks +from ..shared.api_meta import APIMeta +from ..shared.api_links import APILinks from ..api_agent_api_key_info import APIAgentAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/do_gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py index 1f3c3d69..af25150e 100644 --- a/src/do_gradientai/types/agents/version_list_response.py +++ b/src/do_gradientai/types/agents/version_list_response.py @@ -5,9 +5,9 @@ from pydantic import Field as FieldInfo -from .api_meta import APIMeta from ..._models import BaseModel -from .api_links import APILinks +from ..shared.api_meta import APIMeta +from ..shared.api_links import APILinks from ..api_retrieval_method import APIRetrievalMethod __all__ = [ diff --git a/src/do_gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py index 535e2f96..3e937950 100644 --- a/src/do_gradientai/types/inference/api_key_list_response.py +++ b/src/do_gradientai/types/inference/api_key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks +from ..shared.api_meta import APIMeta +from ..shared.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/do_gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py index 09ca1ad3..e8998b51 100644 --- a/src/do_gradientai/types/knowledge_base_list_response.py +++ b/src/do_gradientai/types/knowledge_base_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase __all__ = ["KnowledgeBaseListResponse"] diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py index 78246ce1..2e5fc517 100644 --- a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks +from ..shared.api_meta import APIMeta +from ..shared.api_links import APILinks from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource __all__ = ["DataSourceListResponse"] diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py index 4784c1a1..deea4562 100644 --- a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta +from ..shared.api_meta import APIMeta from .api_indexing_job import APIIndexingJob -from ..agents.api_links import APILinks +from ..shared.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py index e6f5fad5..47651759 100644 --- a/src/do_gradientai/types/model_list_response.py +++ b/src/do_gradientai/types/model_list_response.py @@ -4,8 +4,8 @@ from .._models import BaseModel from .api_model import APIModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks __all__ = ["ModelListResponse"] diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py index ba6ca946..c9e74cf7 100644 --- a/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py +++ b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...shared.api_meta import APIMeta +from ...shared.api_links import APILinks __all__ = ["KeyListAgentsResponse"] diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_response.py index d0b84e96..e3e3e5ef 100644 --- a/src/do_gradientai/types/model_providers/anthropic/key_list_response.py +++ b/src/do_gradientai/types/model_providers/anthropic/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...shared.api_meta import APIMeta +from ...shared.api_links import APILinks from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/do_gradientai/types/model_providers/openai/key_list_response.py b/src/do_gradientai/types/model_providers/openai/key_list_response.py index c263cba3..362b5dd6 100644 --- a/src/do_gradientai/types/model_providers/openai/key_list_response.py +++ b/src/do_gradientai/types/model_providers/openai/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...shared.api_meta import APIMeta +from ...shared.api_links import APILinks from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py index f42edea6..56808bac 100644 --- a/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py +++ b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...shared.api_meta import APIMeta +from ...shared.api_links import APILinks __all__ = ["KeyRetrieveAgentsResponse"] diff --git a/src/do_gradientai/types/shared/__init__.py b/src/do_gradientai/types/shared/__init__.py new file mode 100644 index 00000000..5f02d62f --- /dev/null +++ b/src/do_gradientai/types/shared/__init__.py @@ -0,0 +1,4 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .api_meta import APIMeta as APIMeta +from .api_links import APILinks as APILinks diff --git a/src/do_gradientai/types/agents/api_links.py b/src/do_gradientai/types/shared/api_links.py similarity index 100% rename from src/do_gradientai/types/agents/api_links.py rename to src/do_gradientai/types/shared/api_links.py diff --git a/src/do_gradientai/types/agents/api_meta.py b/src/do_gradientai/types/shared/api_meta.py similarity index 100% rename from src/do_gradientai/types/agents/api_meta.py rename to src/do_gradientai/types/shared/api_meta.py From b7cbbaa52cb8872bcb0219656fd1ad44ee5d0495 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 19:14:35 +0000 Subject: [PATCH 078/200] feat(api): update OpenAI spec and add endpoint/smodels --- .stats.yml | 8 +- api.md | 52 +- src/do_gradientai/resources/agents/agents.py | 16 +- .../agents/evaluation_metrics/__init__.py | 33 + .../evaluation_metrics.py | 44 +- .../evaluation_metrics/workspaces/__init__.py | 33 + .../evaluation_metrics/workspaces/agents.py | 324 +++++++++ .../workspaces/workspaces.py | 654 ++++++++++++++++++ .../resources/agents/evaluation_runs.py | 105 ++- .../resources/agents/evaluation_test_cases.py | 25 +- src/do_gradientai/resources/models.py | 182 +++-- src/do_gradientai/types/__init__.py | 3 +- src/do_gradientai/types/agents/__init__.py | 4 + .../types/agents/api_evaluation_run.py | 12 + .../agents/evaluation_metrics/__init__.py | 14 + .../workspace_create_params.py | 16 + .../workspace_create_response.py | 16 + .../workspace_delete_response.py | 11 + ...ace_list_evaluation_test_cases_response.py | 12 + .../workspace_list_response.py | 16 + .../workspace_retrieve_response.py | 16 + .../workspace_update_params.py | 18 + .../workspace_update_response.py | 16 + .../evaluation_metrics/workspaces/__init__.py | 8 + .../workspaces/agent_list_params.py | 26 + .../workspaces/agent_list_response.py | 22 + .../workspaces/agent_move_params.py | 16 + .../workspaces/agent_move_response.py | 16 + .../agents/evaluation_run_create_params.py | 5 +- .../agents/evaluation_run_create_response.py | 4 +- ...valuation_run_retrieve_results_response.py | 12 + .../evaluation_test_case_retrieve_params.py | 12 + src/do_gradientai/types/api_model.py | 32 - .../types/knowledge_bases/__init__.py | 1 + .../api_indexed_data_source.py | 48 ++ .../types/knowledge_bases/api_indexing_job.py | 12 + .../api_knowledge_base_data_source.py | 3 + ...xing_job_retrieve_data_sources_response.py | 46 +- src/do_gradientai/types/model.py | 21 + src/do_gradientai/types/model_list_params.py | 42 -- .../types/model_list_response.py | 13 +- .../agents/evaluation_metrics/__init__.py | 1 + .../evaluation_metrics/test_workspaces.py | 521 ++++++++++++++ .../evaluation_metrics/workspaces/__init__.py | 1 + .../workspaces/test_agents.py | 239 +++++++ .../agents/test_evaluation_runs.py | 97 ++- .../agents/test_evaluation_test_cases.py | 34 +- tests/api_resources/test_models.py | 100 ++- 48 files changed, 2667 insertions(+), 295 deletions(-) create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/__init__.py rename src/do_gradientai/resources/agents/{ => evaluation_metrics}/evaluation_metrics.py (77%) create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/__init__.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py delete mode 100644 src/do_gradientai/types/api_model.py create mode 100644 src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py create mode 100644 src/do_gradientai/types/model.py delete mode 100644 src/do_gradientai/types/model_list_params.py create mode 100644 tests/api_resources/agents/evaluation_metrics/__init__.py create mode 100644 tests/api_resources/agents/evaluation_metrics/test_workspaces.py create mode 100644 tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py create mode 100644 tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py diff --git a/.stats.yml b/.stats.yml index 876bab8e..291ef184 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 67 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml -openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 70cce9f06a7f98292ef13598418ed48d +configured_endpoints: 77 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml +openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 +config_hash: ecf128ea21a8fead9dabb9609c4dbce8 diff --git a/api.md b/api.md index 3bce144e..78a81061 100644 --- a/api.md +++ b/api.md @@ -68,7 +68,47 @@ from do_gradientai.types.agents import EvaluationMetricListResponse Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse + +### Workspaces + +Types: + +```python +from do_gradientai.types.agents.evaluation_metrics import ( + WorkspaceCreateResponse, + WorkspaceRetrieveResponse, + WorkspaceUpdateResponse, + WorkspaceListResponse, + WorkspaceDeleteResponse, + WorkspaceListEvaluationTestCasesResponse, +) +``` + +Methods: + +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse + +#### Agents + +Types: + +```python +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( + AgentListResponse, + AgentMoveResponse, +) +``` + +Methods: + +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ## EvaluationRuns @@ -83,6 +123,7 @@ from do_gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, + EvaluationRunRetrieveResultsResponse, ) ``` @@ -91,6 +132,7 @@ Methods: - client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse - client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse - client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases @@ -111,7 +153,7 @@ from do_gradientai.types.agents import ( Methods: - client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse - client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse - client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse - client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse @@ -315,6 +357,7 @@ Types: ```python from do_gradientai.types.knowledge_bases import ( + APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -376,9 +419,10 @@ Methods: Types: ```python -from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse +from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, Model, ModelListResponse ``` Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.retrieve(model) -> Model +- client.models.list() -> ModelListResponse diff --git a/src/do_gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py index 5762139d..0a6e183c 100644 --- a/src/do_gradientai/resources/agents/agents.py +++ b/src/do_gradientai/resources/agents/agents.py @@ -73,14 +73,6 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) -from .evaluation_metrics import ( - EvaluationMetricsResource, - AsyncEvaluationMetricsResource, - EvaluationMetricsResourceWithRawResponse, - AsyncEvaluationMetricsResourceWithRawResponse, - EvaluationMetricsResourceWithStreamingResponse, - AsyncEvaluationMetricsResourceWithStreamingResponse, -) from .evaluation_datasets import ( EvaluationDatasetsResource, AsyncEvaluationDatasetsResource, @@ -105,6 +97,14 @@ from ...types.agent_retrieve_response import AgentRetrieveResponse from ...types.api_deployment_visibility import APIDeploymentVisibility from ...types.agent_update_status_response import AgentUpdateStatusResponse +from .evaluation_metrics.evaluation_metrics import ( + EvaluationMetricsResource, + AsyncEvaluationMetricsResource, + EvaluationMetricsResourceWithRawResponse, + AsyncEvaluationMetricsResourceWithRawResponse, + EvaluationMetricsResourceWithStreamingResponse, + AsyncEvaluationMetricsResourceWithStreamingResponse, +) __all__ = ["AgentsResource", "AsyncAgentsResource"] diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py new file mode 100644 index 00000000..1c0ec1ea --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .workspaces import ( + WorkspacesResource, + AsyncWorkspacesResource, + WorkspacesResourceWithRawResponse, + AsyncWorkspacesResourceWithRawResponse, + WorkspacesResourceWithStreamingResponse, + AsyncWorkspacesResourceWithStreamingResponse, +) +from .evaluation_metrics import ( + EvaluationMetricsResource, + AsyncEvaluationMetricsResource, + EvaluationMetricsResourceWithRawResponse, + AsyncEvaluationMetricsResourceWithRawResponse, + EvaluationMetricsResourceWithStreamingResponse, + AsyncEvaluationMetricsResourceWithStreamingResponse, +) + +__all__ = [ + "WorkspacesResource", + "AsyncWorkspacesResource", + "WorkspacesResourceWithRawResponse", + "AsyncWorkspacesResourceWithRawResponse", + "WorkspacesResourceWithStreamingResponse", + "AsyncWorkspacesResourceWithStreamingResponse", + "EvaluationMetricsResource", + "AsyncEvaluationMetricsResource", + "EvaluationMetricsResourceWithRawResponse", + "AsyncEvaluationMetricsResourceWithRawResponse", + "EvaluationMetricsResourceWithStreamingResponse", + "AsyncEvaluationMetricsResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/agents/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 77% rename from src/do_gradientai/resources/agents/evaluation_metrics.py rename to src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py index c554be3e..ce549527 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics.py +++ b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -4,22 +4,34 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.agents.evaluation_metric_list_response import EvaluationMetricListResponse +from ...._base_client import make_request_options +from .workspaces.workspaces import ( + WorkspacesResource, + AsyncWorkspacesResource, + WorkspacesResourceWithRawResponse, + AsyncWorkspacesResourceWithRawResponse, + WorkspacesResourceWithStreamingResponse, + AsyncWorkspacesResourceWithStreamingResponse, +) +from ....types.agents.evaluation_metric_list_response import EvaluationMetricListResponse __all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"] class EvaluationMetricsResource(SyncAPIResource): + @cached_property + def workspaces(self) -> WorkspacesResource: + return WorkspacesResource(self._client) + @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -65,6 +77,10 @@ def list( class AsyncEvaluationMetricsResource(AsyncAPIResource): + @cached_property + def workspaces(self) -> AsyncWorkspacesResource: + return AsyncWorkspacesResource(self._client) + @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -117,6 +133,10 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: evaluation_metrics.list, ) + @cached_property + def workspaces(self) -> WorkspacesResourceWithRawResponse: + return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) + class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -126,6 +146,10 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: evaluation_metrics.list, ) + @cached_property + def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse: + return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) + class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -135,6 +159,10 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: evaluation_metrics.list, ) + @cached_property + def workspaces(self) -> WorkspacesResourceWithStreamingResponse: + return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) + class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -143,3 +171,7 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_streamed_response_wrapper( evaluation_metrics.list, ) + + @cached_property + def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse: + return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py new file mode 100644 index 00000000..79d75f90 --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from .workspaces import ( + WorkspacesResource, + AsyncWorkspacesResource, + WorkspacesResourceWithRawResponse, + AsyncWorkspacesResourceWithRawResponse, + WorkspacesResourceWithStreamingResponse, + AsyncWorkspacesResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "WorkspacesResource", + "AsyncWorkspacesResource", + "WorkspacesResourceWithRawResponse", + "AsyncWorkspacesResourceWithRawResponse", + "WorkspacesResourceWithStreamingResponse", + "AsyncWorkspacesResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py new file mode 100644 index 00000000..1e11739f --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py @@ -0,0 +1,324 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.agents.evaluation_metrics.workspaces import agent_list_params, agent_move_params +from .....types.agents.evaluation_metrics.workspaces.agent_list_response import AgentListResponse +from .....types.agents.evaluation_metrics.workspaces.agent_move_response import AgentMoveResponse + +__all__ = ["AgentsResource", "AsyncAgentsResource"] + + +class AgentsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AgentsResourceWithStreamingResponse(self) + + def list( + self, + workspace_uuid: str, + *, + field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents by a Workspace, send a GET request to + `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. + + Args: + only_deployed: Only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return self._get( + f"/v2/gen-ai/workspaces/{workspace_uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "field_mask": field_mask, + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + + def move( + self, + path_workspace_uuid: str, + *, + agent_uuids: List[str] | NotGiven = NOT_GIVEN, + body_workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentMoveResponse: + """ + To move all listed agetns a given workspace, send a PUT request to + `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_workspace_uuid: + raise ValueError( + f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}" + ) + return self._put( + f"/v2/gen-ai/workspaces/{path_workspace_uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}/agents", + body=maybe_transform( + { + "agent_uuids": agent_uuids, + "body_workspace_uuid": body_workspace_uuid, + }, + agent_move_params.AgentMoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentMoveResponse, + ) + + +class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAgentsResourceWithStreamingResponse(self) + + async def list( + self, + workspace_uuid: str, + *, + field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents by a Workspace, send a GET request to + `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. + + Args: + only_deployed: Only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return await self._get( + f"/v2/gen-ai/workspaces/{workspace_uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "field_mask": field_mask, + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + + async def move( + self, + path_workspace_uuid: str, + *, + agent_uuids: List[str] | NotGiven = NOT_GIVEN, + body_workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentMoveResponse: + """ + To move all listed agetns a given workspace, send a PUT request to + `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_workspace_uuid: + raise ValueError( + f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}" + ) + return await self._put( + f"/v2/gen-ai/workspaces/{path_workspace_uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}/agents", + body=await async_maybe_transform( + { + "agent_uuids": agent_uuids, + "body_workspace_uuid": body_workspace_uuid, + }, + agent_move_params.AgentMoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentMoveResponse, + ) + + +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + self.list = to_raw_response_wrapper( + agents.list, + ) + self.move = to_raw_response_wrapper( + agents.move, + ) + + +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + self.list = async_to_raw_response_wrapper( + agents.list, + ) + self.move = async_to_raw_response_wrapper( + agents.move, + ) + + +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + self.list = to_streamed_response_wrapper( + agents.list, + ) + self.move = to_streamed_response_wrapper( + agents.move, + ) + + +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + self.list = async_to_streamed_response_wrapper( + agents.list, + ) + self.move = async_to_streamed_response_wrapper( + agents.move, + ) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py new file mode 100644 index 00000000..0f506118 --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -0,0 +1,654 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.agents.evaluation_metrics import workspace_create_params, workspace_update_params +from .....types.agents.evaluation_metrics.workspace_list_response import WorkspaceListResponse +from .....types.agents.evaluation_metrics.workspace_create_response import WorkspaceCreateResponse +from .....types.agents.evaluation_metrics.workspace_delete_response import WorkspaceDeleteResponse +from .....types.agents.evaluation_metrics.workspace_update_response import WorkspaceUpdateResponse +from .....types.agents.evaluation_metrics.workspace_retrieve_response import WorkspaceRetrieveResponse +from .....types.agents.evaluation_metrics.workspace_list_evaluation_test_cases_response import ( + WorkspaceListEvaluationTestCasesResponse, +) + +__all__ = ["WorkspacesResource", "AsyncWorkspacesResource"] + + +class WorkspacesResource(SyncAPIResource): + @cached_property + def agents(self) -> AgentsResource: + return AgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> WorkspacesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return WorkspacesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> WorkspacesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return WorkspacesResourceWithStreamingResponse(self) + + def create( + self, + *, + agent_uuids: List[str] | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceCreateResponse: + """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`. + + The + response body contains a JSON object with the newly created workspace object. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/workspaces" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/workspaces", + body=maybe_transform( + { + "agent_uuids": agent_uuids, + "description": description, + "name": name, + }, + workspace_create_params.WorkspaceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceCreateResponse, + ) + + def retrieve( + self, + workspace_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceRetrieveResponse: + """ + To retrieve details of a workspace, GET request to + `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object + containing the workspace. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return self._get( + f"/v2/gen-ai/workspaces/{workspace_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceRetrieveResponse, + ) + + def update( + self, + path_workspace_uuid: str, + *, + description: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + body_workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceUpdateResponse: + """ + To update a workspace, send a PUT request to + `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object + containing the workspace. + + Args: + body_workspace_uuid: Workspace UUID. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_workspace_uuid: + raise ValueError( + f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}" + ) + return self._put( + f"/v2/gen-ai/workspaces/{path_workspace_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}", + body=maybe_transform( + { + "description": description, + "name": name, + "body_workspace_uuid": body_workspace_uuid, + }, + workspace_update_params.WorkspaceUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceUpdateResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceListResponse: + """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`.""" + return self._get( + "/v2/gen-ai/workspaces" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/workspaces", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceListResponse, + ) + + def delete( + self, + workspace_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceDeleteResponse: + """ + To delete a workspace, send a DELETE request to + `/v2/gen-ai/workspace/{workspace_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return self._delete( + f"/v2/gen-ai/workspaces/{workspace_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceDeleteResponse, + ) + + def list_evaluation_test_cases( + self, + workspace_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceListEvaluationTestCasesResponse: + """ + To list all evaluation test cases by a workspace, send a GET request to + `/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return self._get( + f"/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceListEvaluationTestCasesResponse, + ) + + +class AsyncWorkspacesResource(AsyncAPIResource): + @cached_property + def agents(self) -> AsyncAgentsResource: + return AsyncAgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncWorkspacesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncWorkspacesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncWorkspacesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncWorkspacesResourceWithStreamingResponse(self) + + async def create( + self, + *, + agent_uuids: List[str] | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceCreateResponse: + """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`. + + The + response body contains a JSON object with the newly created workspace object. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/workspaces" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/workspaces", + body=await async_maybe_transform( + { + "agent_uuids": agent_uuids, + "description": description, + "name": name, + }, + workspace_create_params.WorkspaceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceCreateResponse, + ) + + async def retrieve( + self, + workspace_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceRetrieveResponse: + """ + To retrieve details of a workspace, GET request to + `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object + containing the workspace. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return await self._get( + f"/v2/gen-ai/workspaces/{workspace_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceRetrieveResponse, + ) + + async def update( + self, + path_workspace_uuid: str, + *, + description: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + body_workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceUpdateResponse: + """ + To update a workspace, send a PUT request to + `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object + containing the workspace. + + Args: + body_workspace_uuid: Workspace UUID. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_workspace_uuid: + raise ValueError( + f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}" + ) + return await self._put( + f"/v2/gen-ai/workspaces/{path_workspace_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}", + body=await async_maybe_transform( + { + "description": description, + "name": name, + "body_workspace_uuid": body_workspace_uuid, + }, + workspace_update_params.WorkspaceUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceUpdateResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceListResponse: + """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`.""" + return await self._get( + "/v2/gen-ai/workspaces" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/workspaces", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceListResponse, + ) + + async def delete( + self, + workspace_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceDeleteResponse: + """ + To delete a workspace, send a DELETE request to + `/v2/gen-ai/workspace/{workspace_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return await self._delete( + f"/v2/gen-ai/workspaces/{workspace_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceDeleteResponse, + ) + + async def list_evaluation_test_cases( + self, + workspace_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> WorkspaceListEvaluationTestCasesResponse: + """ + To list all evaluation test cases by a workspace, send a GET request to + `/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not workspace_uuid: + raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}") + return await self._get( + f"/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=WorkspaceListEvaluationTestCasesResponse, + ) + + +class WorkspacesResourceWithRawResponse: + def __init__(self, workspaces: WorkspacesResource) -> None: + self._workspaces = workspaces + + self.create = to_raw_response_wrapper( + workspaces.create, + ) + self.retrieve = to_raw_response_wrapper( + workspaces.retrieve, + ) + self.update = to_raw_response_wrapper( + workspaces.update, + ) + self.list = to_raw_response_wrapper( + workspaces.list, + ) + self.delete = to_raw_response_wrapper( + workspaces.delete, + ) + self.list_evaluation_test_cases = to_raw_response_wrapper( + workspaces.list_evaluation_test_cases, + ) + + @cached_property + def agents(self) -> AgentsResourceWithRawResponse: + return AgentsResourceWithRawResponse(self._workspaces.agents) + + +class AsyncWorkspacesResourceWithRawResponse: + def __init__(self, workspaces: AsyncWorkspacesResource) -> None: + self._workspaces = workspaces + + self.create = async_to_raw_response_wrapper( + workspaces.create, + ) + self.retrieve = async_to_raw_response_wrapper( + workspaces.retrieve, + ) + self.update = async_to_raw_response_wrapper( + workspaces.update, + ) + self.list = async_to_raw_response_wrapper( + workspaces.list, + ) + self.delete = async_to_raw_response_wrapper( + workspaces.delete, + ) + self.list_evaluation_test_cases = async_to_raw_response_wrapper( + workspaces.list_evaluation_test_cases, + ) + + @cached_property + def agents(self) -> AsyncAgentsResourceWithRawResponse: + return AsyncAgentsResourceWithRawResponse(self._workspaces.agents) + + +class WorkspacesResourceWithStreamingResponse: + def __init__(self, workspaces: WorkspacesResource) -> None: + self._workspaces = workspaces + + self.create = to_streamed_response_wrapper( + workspaces.create, + ) + self.retrieve = to_streamed_response_wrapper( + workspaces.retrieve, + ) + self.update = to_streamed_response_wrapper( + workspaces.update, + ) + self.list = to_streamed_response_wrapper( + workspaces.list, + ) + self.delete = to_streamed_response_wrapper( + workspaces.delete, + ) + self.list_evaluation_test_cases = to_streamed_response_wrapper( + workspaces.list_evaluation_test_cases, + ) + + @cached_property + def agents(self) -> AgentsResourceWithStreamingResponse: + return AgentsResourceWithStreamingResponse(self._workspaces.agents) + + +class AsyncWorkspacesResourceWithStreamingResponse: + def __init__(self, workspaces: AsyncWorkspacesResource) -> None: + self._workspaces = workspaces + + self.create = async_to_streamed_response_wrapper( + workspaces.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + workspaces.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + workspaces.update, + ) + self.list = async_to_streamed_response_wrapper( + workspaces.list, + ) + self.delete = async_to_streamed_response_wrapper( + workspaces.delete, + ) + self.list_evaluation_test_cases = async_to_streamed_response_wrapper( + workspaces.list_evaluation_test_cases, + ) + + @cached_property + def agents(self) -> AsyncAgentsResourceWithStreamingResponse: + return AsyncAgentsResourceWithStreamingResponse(self._workspaces.agents) diff --git a/src/do_gradientai/resources/agents/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py index 7e207e7d..47045132 100644 --- a/src/do_gradientai/resources/agents/evaluation_runs.py +++ b/src/do_gradientai/resources/agents/evaluation_runs.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import List + import httpx from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven @@ -19,6 +21,7 @@ from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse +from ...types.agents.evaluation_run_retrieve_results_response import EvaluationRunRetrieveResultsResponse __all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"] @@ -46,7 +49,7 @@ def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse def create( self, *, - agent_uuid: str | NotGiven = NOT_GIVEN, + agent_uuids: List[str] | NotGiven = NOT_GIVEN, run_name: str | NotGiven = NOT_GIVEN, test_case_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -61,7 +64,7 @@ def create( `/v2/gen-ai/evaluation_runs`. Args: - agent_uuid: Agent UUID to run the test case against. + agent_uuids: Agent UUIDs to run the test case against. run_name: The name of the run. @@ -79,7 +82,7 @@ def create( else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs", body=maybe_transform( { - "agent_uuid": agent_uuid, + "agent_uuids": agent_uuids, "run_name": run_name, "test_case_uuid": test_case_uuid, }, @@ -167,6 +170,45 @@ def list_results( cast_to=EvaluationRunListResultsResponse, ) + def retrieve_results( + self, + prompt_id: int, + *, + evaluation_run_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunRetrieveResultsResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunRetrieveResultsResponse, + ) + class AsyncEvaluationRunsResource(AsyncAPIResource): @cached_property @@ -191,7 +233,7 @@ def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingRes async def create( self, *, - agent_uuid: str | NotGiven = NOT_GIVEN, + agent_uuids: List[str] | NotGiven = NOT_GIVEN, run_name: str | NotGiven = NOT_GIVEN, test_case_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -206,7 +248,7 @@ async def create( `/v2/gen-ai/evaluation_runs`. Args: - agent_uuid: Agent UUID to run the test case against. + agent_uuids: Agent UUIDs to run the test case against. run_name: The name of the run. @@ -224,7 +266,7 @@ async def create( else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs", body=await async_maybe_transform( { - "agent_uuid": agent_uuid, + "agent_uuids": agent_uuids, "run_name": run_name, "test_case_uuid": test_case_uuid, }, @@ -312,6 +354,45 @@ async def list_results( cast_to=EvaluationRunListResultsResponse, ) + async def retrieve_results( + self, + prompt_id: int, + *, + evaluation_run_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunRetrieveResultsResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunRetrieveResultsResponse, + ) + class EvaluationRunsResourceWithRawResponse: def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: @@ -326,6 +407,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: self.list_results = to_raw_response_wrapper( evaluation_runs.list_results, ) + self.retrieve_results = to_raw_response_wrapper( + evaluation_runs.retrieve_results, + ) class AsyncEvaluationRunsResourceWithRawResponse: @@ -341,6 +425,9 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: self.list_results = async_to_raw_response_wrapper( evaluation_runs.list_results, ) + self.retrieve_results = async_to_raw_response_wrapper( + evaluation_runs.retrieve_results, + ) class EvaluationRunsResourceWithStreamingResponse: @@ -356,6 +443,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: self.list_results = to_streamed_response_wrapper( evaluation_runs.list_results, ) + self.retrieve_results = to_streamed_response_wrapper( + evaluation_runs.retrieve_results, + ) class AsyncEvaluationRunsResourceWithStreamingResponse: @@ -371,3 +461,6 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: self.list_results = async_to_streamed_response_wrapper( evaluation_runs.list_results, ) + self.retrieve_results = async_to_streamed_response_wrapper( + evaluation_runs.retrieve_results, + ) diff --git a/src/do_gradientai/resources/agents/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py index 995df025..beff8752 100644 --- a/src/do_gradientai/resources/agents/evaluation_test_cases.py +++ b/src/do_gradientai/resources/agents/evaluation_test_cases.py @@ -20,6 +20,7 @@ from ...types.agents import ( evaluation_test_case_create_params, evaluation_test_case_update_params, + evaluation_test_case_retrieve_params, evaluation_test_case_list_evaluation_runs_params, ) from ...types.agents.api_star_metric_param import APIStarMetricParam @@ -118,6 +119,7 @@ def retrieve( self, test_case_uuid: str, *, + evaluation_test_case_version: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -130,6 +132,8 @@ def retrieve( request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`. Args: + evaluation_test_case_version: Version of the test case. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -145,7 +149,14 @@ def retrieve( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"evaluation_test_case_version": evaluation_test_case_version}, + evaluation_test_case_retrieve_params.EvaluationTestCaseRetrieveParams, + ), ), cast_to=EvaluationTestCaseRetrieveResponse, ) @@ -368,6 +379,7 @@ async def retrieve( self, test_case_uuid: str, *, + evaluation_test_case_version: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -380,6 +392,8 @@ async def retrieve( request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`. Args: + evaluation_test_case_version: Version of the test case. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -395,7 +409,14 @@ async def retrieve( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"evaluation_test_case_version": evaluation_test_case_version}, + evaluation_test_case_retrieve_params.EvaluationTestCaseRetrieveParams, + ), ), cast_to=EvaluationTestCaseRetrieveResponse, ) diff --git a/src/do_gradientai/resources/models.py b/src/do_gradientai/resources/models.py index c8e78b9b..da5462ae 100644 --- a/src/do_gradientai/resources/models.py +++ b/src/do_gradientai/resources/models.py @@ -2,14 +2,9 @@ from __future__ import annotations -from typing import List -from typing_extensions import Literal - import httpx -from ..types import model_list_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import ( @@ -18,6 +13,7 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from ..types.model import Model from .._base_client import make_request_options from ..types.model_list_response import ModelListResponse @@ -44,52 +40,22 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def list( + def retrieve( self, + model: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> Model: """ - To list all models, send a GET request to `/v2/gen-ai/models`. + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -98,24 +64,36 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - "/v2/gen-ai/models" + f"/models/{model}" if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Model, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelListResponse, ) @@ -141,52 +119,22 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def list( + async def retrieve( self, + model: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> Model: """ - To list all models, send a GET request to `/v2/gen-ai/models`. + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -195,24 +143,36 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - "/v2/gen-ai/models" + f"/models/{model}" if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Model, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return await self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelListResponse, ) @@ -222,6 +182,9 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models + self.retrieve = to_raw_response_wrapper( + models.retrieve, + ) self.list = to_raw_response_wrapper( models.list, ) @@ -231,6 +194,9 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models + self.retrieve = async_to_raw_response_wrapper( + models.retrieve, + ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -240,6 +206,9 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models + self.retrieve = to_streamed_response_wrapper( + models.retrieve, + ) self.list = to_streamed_response_wrapper( models.list, ) @@ -249,6 +218,9 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models + self.retrieve = async_to_streamed_response_wrapper( + models.retrieve, + ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py index 23cf1802..626c3840 100644 --- a/src/do_gradientai/types/__init__.py +++ b/src/do_gradientai/types/__init__.py @@ -2,15 +2,14 @@ from __future__ import annotations +from .model import Model as Model from .shared import APIMeta as APIMeta, APILinks as APILinks from .api_agent import APIAgent as APIAgent -from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion -from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py index 0060e12c..9c6508f6 100644 --- a/src/do_gradientai/types/agents/__init__.py +++ b/src/do_gradientai/types/agents/__init__.py @@ -45,10 +45,14 @@ from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams from .evaluation_run_list_results_response import EvaluationRunListResultsResponse as EvaluationRunListResultsResponse from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse +from .evaluation_test_case_retrieve_params import EvaluationTestCaseRetrieveParams as EvaluationTestCaseRetrieveParams from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse from .evaluation_test_case_retrieve_response import ( EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, ) +from .evaluation_run_retrieve_results_response import ( + EvaluationRunRetrieveResultsResponse as EvaluationRunRetrieveResultsResponse, +) from .evaluation_test_case_list_evaluation_runs_params import ( EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams, ) diff --git a/src/do_gradientai/types/agents/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py index ae046d3e..b879f756 100644 --- a/src/do_gradientai/types/agents/api_evaluation_run.py +++ b/src/do_gradientai/types/agents/api_evaluation_run.py @@ -11,11 +11,23 @@ class APIEvaluationRun(BaseModel): + agent_deleted: Optional[bool] = None + + agent_name: Optional[str] = None + agent_uuid: Optional[str] = None """Agent UUID.""" agent_version_hash: Optional[str] = None + agent_workspace_uuid: Optional[str] = None + + created_by_user_email: Optional[str] = None + + created_by_user_id: Optional[str] = None + + error_description: Optional[str] = None + evaluation_run_uuid: Optional[str] = None """Evaluation run UUID.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py new file mode 100644 index 00000000..7af9b074 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams +from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse +from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams +from .workspace_create_response import WorkspaceCreateResponse as WorkspaceCreateResponse +from .workspace_delete_response import WorkspaceDeleteResponse as WorkspaceDeleteResponse +from .workspace_update_response import WorkspaceUpdateResponse as WorkspaceUpdateResponse +from .workspace_retrieve_response import WorkspaceRetrieveResponse as WorkspaceRetrieveResponse +from .workspace_list_evaluation_test_cases_response import ( + WorkspaceListEvaluationTestCasesResponse as WorkspaceListEvaluationTestCasesResponse, +) diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py new file mode 100644 index 00000000..73f390be --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["WorkspaceCreateParams"] + + +class WorkspaceCreateParams(TypedDict, total=False): + agent_uuids: List[str] + + description: str + + name: str diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py new file mode 100644 index 00000000..419ec288 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["WorkspaceCreateResponse"] + + +class WorkspaceCreateResponse(BaseModel): + workspace: Optional["APIWorkspace"] = None + + +from ...api_workspace import APIWorkspace diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py new file mode 100644 index 00000000..1fe7b5a2 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["WorkspaceDeleteResponse"] + + +class WorkspaceDeleteResponse(BaseModel): + workspace_uuid: Optional[str] = None diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py new file mode 100644 index 00000000..32c613f8 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ..api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["WorkspaceListEvaluationTestCasesResponse"] + + +class WorkspaceListEvaluationTestCasesResponse(BaseModel): + evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py new file mode 100644 index 00000000..64f9a63c --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel + +__all__ = ["WorkspaceListResponse"] + + +class WorkspaceListResponse(BaseModel): + workspaces: Optional[List["APIWorkspace"]] = None + + +from ...api_workspace import APIWorkspace diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py new file mode 100644 index 00000000..fa4a567c --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["WorkspaceRetrieveResponse"] + + +class WorkspaceRetrieveResponse(BaseModel): + workspace: Optional["APIWorkspace"] = None + + +from ...api_workspace import APIWorkspace diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py new file mode 100644 index 00000000..fd09079e --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["WorkspaceUpdateParams"] + + +class WorkspaceUpdateParams(TypedDict, total=False): + description: str + + name: str + + body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] + """Workspace UUID.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py new file mode 100644 index 00000000..77dac88c --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["WorkspaceUpdateResponse"] + + +class WorkspaceUpdateResponse(BaseModel): + workspace: Optional["APIWorkspace"] = None + + +from ...api_workspace import APIWorkspace diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py new file mode 100644 index 00000000..9f369c7c --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .agent_list_params import AgentListParams as AgentListParams +from .agent_move_params import AgentMoveParams as AgentMoveParams +from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_move_response import AgentMoveResponse as AgentMoveResponse diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py new file mode 100644 index 00000000..277274ed --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["AgentListParams", "FieldMask"] + + +class AgentListParams(TypedDict, total=False): + field_mask: FieldMask + + only_deployed: bool + """Only list agents that are deployed.""" + + page: int + """page number.""" + + per_page: int + """items per page.""" + + +class FieldMask(TypedDict, total=False): + paths: List[str] + """The set of field mask paths.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py new file mode 100644 index 00000000..1e520736 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ....._models import BaseModel +from ....shared.api_meta import APIMeta +from ....shared.api_links import APILinks + +__all__ = ["AgentListResponse"] + + +class AgentListResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ....api_agent import APIAgent diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py new file mode 100644 index 00000000..8e92503a --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from ....._utils import PropertyInfo + +__all__ = ["AgentMoveParams"] + + +class AgentMoveParams(TypedDict, total=False): + agent_uuids: List[str] + + body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py new file mode 100644 index 00000000..d2d084d5 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ....._models import BaseModel + +__all__ = ["AgentMoveResponse"] + + +class AgentMoveResponse(BaseModel): + workspace: Optional["APIWorkspace"] = None + + +from ....api_workspace import APIWorkspace diff --git a/src/do_gradientai/types/agents/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py index 1ae2dbbb..47bdabd6 100644 --- a/src/do_gradientai/types/agents/evaluation_run_create_params.py +++ b/src/do_gradientai/types/agents/evaluation_run_create_params.py @@ -2,14 +2,15 @@ from __future__ import annotations +from typing import List from typing_extensions import TypedDict __all__ = ["EvaluationRunCreateParams"] class EvaluationRunCreateParams(TypedDict, total=False): - agent_uuid: str - """Agent UUID to run the test case against.""" + agent_uuids: List[str] + """Agent UUIDs to run the test case against.""" run_name: str """The name of the run.""" diff --git a/src/do_gradientai/types/agents/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py index 36942c29..90da2e61 100644 --- a/src/do_gradientai/types/agents/evaluation_run_create_response.py +++ b/src/do_gradientai/types/agents/evaluation_run_create_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import List, Optional from ..._models import BaseModel @@ -8,4 +8,4 @@ class EvaluationRunCreateResponse(BaseModel): - evaluation_run_uuid: Optional[str] = None + evaluation_run_uuids: Optional[List[str]] = None diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py new file mode 100644 index 00000000..4bb70732 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_evaluation_prompt import APIEvaluationPrompt + +__all__ = ["EvaluationRunRetrieveResultsResponse"] + + +class EvaluationRunRetrieveResultsResponse(BaseModel): + prompt: Optional[APIEvaluationPrompt] = None diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py new file mode 100644 index 00000000..f84fe876 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationTestCaseRetrieveParams"] + + +class EvaluationTestCaseRetrieveParams(TypedDict, total=False): + evaluation_test_case_version: int + """Version of the test case.""" diff --git a/src/do_gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py deleted file mode 100644 index c2bc1edd..00000000 --- a/src/do_gradientai/types/api_model.py +++ /dev/null @@ -1,32 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel -from .api_agreement import APIAgreement -from .api_model_version import APIModelVersion - -__all__ = ["APIModel"] - - -class APIModel(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py index 9fc915e5..b23053f2 100644 --- a/src/do_gradientai/types/knowledge_bases/__init__.py +++ b/src/do_gradientai/types/knowledge_bases/__init__.py @@ -5,6 +5,7 @@ from .api_indexing_job import APIIndexingJob as APIIndexingJob from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource +from .api_indexed_data_source import APIIndexedDataSource as APIIndexedDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams diff --git a/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py new file mode 100644 index 00000000..2449e9fd --- /dev/null +++ b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["APIIndexedDataSource"] + + +class APIIndexedDataSource(BaseModel): + completed_at: Optional[datetime] = None + + data_source_uuid: Optional[str] = None + + error_details: Optional[str] = None + + error_msg: Optional[str] = None + + failed_item_count: Optional[str] = None + + indexed_file_count: Optional[str] = None + + indexed_item_count: Optional[str] = None + + removed_item_count: Optional[str] = None + + skipped_item_count: Optional[str] = None + + started_at: Optional[datetime] = None + + status: Optional[ + Literal[ + "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", + "DATA_SOURCE_STATUS_FAILED", + ] + ] = None + + total_bytes: Optional[str] = None + + total_bytes_indexed: Optional[str] = None + + total_file_count: Optional[str] = None diff --git a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py index 2809141c..573a7c4e 100644 --- a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py +++ b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py @@ -34,6 +34,18 @@ class APIIndexingJob(BaseModel): started_at: Optional[datetime] = None + status: Optional[ + Literal[ + "INDEX_JOB_STATUS_UNKNOWN", + "INDEX_JOB_STATUS_PARTIAL", + "INDEX_JOB_STATUS_IN_PROGRESS", + "INDEX_JOB_STATUS_COMPLETED", + "INDEX_JOB_STATUS_FAILED", + "INDEX_JOB_STATUS_NO_CHANGES", + "INDEX_JOB_STATUS_PENDING", + ] + ] = None + tokens: Optional[int] = None total_datasources: Optional[int] = None diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index ca24d6f0..202e4202 100644 --- a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -6,6 +6,7 @@ from ..._models import BaseModel from .api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource +from .api_indexed_data_source import APIIndexedDataSource from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource @@ -32,6 +33,8 @@ class APIKnowledgeBaseDataSource(BaseModel): item_path: Optional[str] = None + last_datasource_indexing_job: Optional[APIIndexedDataSource] = None + last_indexing_job: Optional[APIIndexingJob] = None region: Optional[str] = None diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py index a9d0c2c0..dd0e317e 100644 --- a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py +++ b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py @@ -1,52 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal from ..._models import BaseModel +from .api_indexed_data_source import APIIndexedDataSource -__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] - - -class IndexedDataSource(BaseModel): - completed_at: Optional[datetime] = None - - data_source_uuid: Optional[str] = None - - error_details: Optional[str] = None - - error_msg: Optional[str] = None - - failed_item_count: Optional[str] = None - - indexed_file_count: Optional[str] = None - - indexed_item_count: Optional[str] = None - - removed_item_count: Optional[str] = None - - skipped_item_count: Optional[str] = None - - started_at: Optional[datetime] = None - - status: Optional[ - Literal[ - "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", - "DATA_SOURCE_STATUS_FAILED", - ] - ] = None - - total_bytes: Optional[str] = None - - total_bytes_indexed: Optional[str] = None - - total_file_count: Optional[str] = None +__all__ = ["IndexingJobRetrieveDataSourcesResponse"] class IndexingJobRetrieveDataSourcesResponse(BaseModel): - indexed_data_sources: Optional[List[IndexedDataSource]] = None + indexed_data_sources: Optional[List[APIIndexedDataSource]] = None diff --git a/src/do_gradientai/types/model.py b/src/do_gradientai/types/model.py new file mode 100644 index 00000000..2631ee8d --- /dev/null +++ b/src/do_gradientai/types/model.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["Model"] + + +class Model(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" diff --git a/src/do_gradientai/types/model_list_params.py b/src/do_gradientai/types/model_list_params.py deleted file mode 100644 index 4abc1dc1..00000000 --- a/src/do_gradientai/types/model_list_params.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, TypedDict - -__all__ = ["ModelListParams"] - - -class ModelListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" - - public_only: bool - """only include models that are publicly available.""" - - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - """include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - """ diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py index 47651759..8f835449 100644 --- a/src/do_gradientai/types/model_list_response.py +++ b/src/do_gradientai/types/model_list_response.py @@ -1,18 +1,15 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List +from typing_extensions import Literal +from .model import Model from .._models import BaseModel -from .api_model import APIModel -from .shared.api_meta import APIMeta -from .shared.api_links import APILinks __all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - links: Optional[APILinks] = None + data: List[Model] - meta: Optional[APIMeta] = None - - models: Optional[List[APIModel]] = None + object: Literal["list"] diff --git a/tests/api_resources/agents/evaluation_metrics/__init__.py b/tests/api_resources/agents/evaluation_metrics/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py new file mode 100644 index 00000000..42bfa79f --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -0,0 +1,521 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ( + WorkspaceListResponse, + WorkspaceCreateResponse, + WorkspaceDeleteResponse, + WorkspaceUpdateResponse, + WorkspaceRetrieveResponse, + WorkspaceListEvaluationTestCasesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestWorkspaces: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.create() + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.create( + agent_uuids=["string"], + description="description", + name="name", + ) + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = response.parse() + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = response.parse() + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.retrieve( + "workspace_uuid", + ) + assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( + "workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = response.parse() + assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( + "workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = response.parse() + assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.update( + path_workspace_uuid="workspace_uuid", + ) + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.update( + path_workspace_uuid="workspace_uuid", + description="description", + name="name", + body_workspace_uuid="workspace_uuid", + ) + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.with_raw_response.update( + path_workspace_uuid="workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = response.parse() + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.with_streaming_response.update( + path_workspace_uuid="workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = response.parse() + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): + client.agents.evaluation_metrics.workspaces.with_raw_response.update( + path_workspace_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.list() + assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = response.parse() + assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = response.parse() + assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.delete( + "workspace_uuid", + ) + assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.with_raw_response.delete( + "workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = response.parse() + assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( + "workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = response.parse() + assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + client.agents.evaluation_metrics.workspaces.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_test_cases(self, client: GradientAI) -> None: + workspace = client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( + "workspace_uuid", + ) + assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_evaluation_test_cases(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( + "workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = response.parse() + assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_evaluation_test_cases(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( + "workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = response.parse() + assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_evaluation_test_cases(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( + "", + ) + + +class TestAsyncWorkspaces: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.create() + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.create( + agent_uuids=["string"], + description="description", + name="name", + ) + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = await response.parse() + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = await response.parse() + assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.retrieve( + "workspace_uuid", + ) + assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( + "workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = await response.parse() + assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( + "workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = await response.parse() + assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.update( + path_workspace_uuid="workspace_uuid", + ) + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.update( + path_workspace_uuid="workspace_uuid", + description="description", + name="name", + body_workspace_uuid="workspace_uuid", + ) + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( + path_workspace_uuid="workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = await response.parse() + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update( + path_workspace_uuid="workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = await response.parse() + assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): + await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( + path_workspace_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.list() + assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = await response.parse() + assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = await response.parse() + assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.delete( + "workspace_uuid", + ) + assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete( + "workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = await response.parse() + assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( + "workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = await response.parse() + assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + workspace = await async_client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( + "workspace_uuid", + ) + assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( + "workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + workspace = await response.parse() + assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( + "workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + workspace = await response.parse() + assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( + "", + ) diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py b/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py new file mode 100644 index 00000000..e772d668 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -0,0 +1,239 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( + AgentListResponse, + AgentMoveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + agent = client.agents.evaluation_metrics.workspaces.agents.list( + workspace_uuid="workspace_uuid", + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.evaluation_metrics.workspaces.agents.list( + workspace_uuid="workspace_uuid", + field_mask={"paths": ["string"]}, + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( + workspace_uuid="workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( + workspace_uuid="workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( + workspace_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_move(self, client: GradientAI) -> None: + agent = client.agents.evaluation_metrics.workspaces.agents.move( + path_workspace_uuid="workspace_uuid", + ) + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_move_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.evaluation_metrics.workspaces.agents.move( + path_workspace_uuid="workspace_uuid", + agent_uuids=["string"], + body_workspace_uuid="workspace_uuid", + ) + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_move(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( + path_workspace_uuid="workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_move(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( + path_workspace_uuid="workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_move(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): + client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( + path_workspace_uuid="", + ) + + +class TestAsyncAgents: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( + workspace_uuid="workspace_uuid", + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( + workspace_uuid="workspace_uuid", + field_mask={"paths": ["string"]}, + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( + workspace_uuid="workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( + workspace_uuid="workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): + await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( + workspace_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_move(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( + path_workspace_uuid="workspace_uuid", + ) + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( + path_workspace_uuid="workspace_uuid", + agent_uuids=["string"], + body_workspace_uuid="workspace_uuid", + ) + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( + path_workspace_uuid="workspace_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( + path_workspace_uuid="workspace_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentMoveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_move(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): + await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( + path_workspace_uuid="", + ) diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 721be2a0..6bd3cfa5 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -13,6 +13,7 @@ EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, + EvaluationRunRetrieveResultsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -31,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.create( - agent_uuid="agent_uuid", + agent_uuids=["string"], run_name="run_name", test_case_uuid="test_case_uuid", ) @@ -143,6 +144,52 @@ def test_path_params_list_results(self, client: GradientAI) -> None: "", ) + @pytest.mark.skip() + @parametrize + def test_method_retrieve_results(self, client: GradientAI) -> None: + evaluation_run = client.agents.evaluation_runs.retrieve_results( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_results(self, client: GradientAI) -> None: + response = client.agents.evaluation_runs.with_raw_response.retrieve_results( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = response.parse() + assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: + with client.agents.evaluation_runs.with_streaming_response.retrieve_results( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = response.parse() + assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_results(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.agents.evaluation_runs.with_raw_response.retrieve_results( + prompt_id=0, + evaluation_run_uuid="", + ) + class TestAsyncEvaluationRuns: parametrize = pytest.mark.parametrize( @@ -159,7 +206,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.create( - agent_uuid="agent_uuid", + agent_uuids=["string"], run_name="run_name", test_case_uuid="test_case_uuid", ) @@ -270,3 +317,49 @@ async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> await async_client.agents.evaluation_runs.with_raw_response.list_results( "", ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.agents.evaluation_runs.retrieve_results( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_results(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( + prompt_id=0, + evaluation_run_uuid="", + ) diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index 50b285bd..87f66b24 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -72,7 +72,16 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_retrieve(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - "test_case_uuid", + test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.agents.evaluation_test_cases.retrieve( + test_case_uuid="test_case_uuid", + evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -80,7 +89,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.retrieve( - "test_case_uuid", + test_case_uuid="test_case_uuid", ) assert response.is_closed is True @@ -92,7 +101,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.retrieve( - "test_case_uuid", + test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -107,7 +116,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): client.agents.evaluation_test_cases.with_raw_response.retrieve( - "", + test_case_uuid="", ) @pytest.mark.skip() @@ -306,7 +315,16 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - "test_case_uuid", + test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( + test_case_uuid="test_case_uuid", + evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -314,7 +332,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( - "test_case_uuid", + test_case_uuid="test_case_uuid", ) assert response.is_closed is True @@ -326,7 +344,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( - "test_case_uuid", + test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -341,7 +359,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( - "", + test_case_uuid="", ) @pytest.mark.skip() diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index f7e21015..e1f3457b 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ModelListResponse +from do_gradientai.types import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,19 +19,50 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + def test_method_retrieve(self, client: GradientAI) -> None: + model = client.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - model = client.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.models.with_raw_response.retrieve( + "llama3-8b-instruct", ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(Model, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(Model, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @@ -64,19 +95,50 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.models.with_raw_response.retrieve( + "llama3-8b-instruct", ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(Model, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(Model, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() From f69550b679e256018b3876cd8e27936e8a840685 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 02:40:08 +0000 Subject: [PATCH 079/200] =?UTF-8?q?fix(ci):=20release-doctor=20=E2=80=94?= =?UTF-8?q?=20report=20correct=20token=20name?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bin/check-release-environment | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/check-release-environment b/bin/check-release-environment index b1bd8969..b845b0f4 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -3,7 +3,7 @@ errors=() if [ -z "${PYPI_TOKEN}" ]; then - errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") + errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi lenErrors=${#errors[@]} From fbca0fef4c63d6175c078091e2eecdf5e5e9aa30 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 21:12:47 +0000 Subject: [PATCH 080/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b56c3d0b..e8285b71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.4" + ".": "0.1.0-alpha.5" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 7a5cd80b..5e38e921 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.4" +version = "0.1.0-alpha.5" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index 83bf8865..12e8e17e 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.4" # x-release-please-version +__version__ = "0.1.0-alpha.5" # x-release-please-version From ca6f08063e1e5802c819e080d2223ac6c58377a3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 21:18:07 +0000 Subject: [PATCH 081/200] feat(api): manual updates --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 44 ++-- api.md | 198 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{do_gradientai => gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{do_gradientai => gradientai}/_client.py | 0 src/{do_gradientai => gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{do_gradientai => gradientai}/_files.py | 0 src/{do_gradientai => gradientai}/_models.py | 0 src/{do_gradientai => gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{do_gradientai => gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{do_gradientai => gradientai}/_version.py | 2 +- src/{do_gradientai => gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/evaluation_datasets.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/evaluation_metrics.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 0 .../workspaces/workspaces.py | 0 .../resources/agents/evaluation_runs.py | 0 .../resources/agents/evaluation_test_cases.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/routes.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/indexing_jobs.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/model_providers/__init__.py | 0 .../model_providers/anthropic/__init__.py | 0 .../model_providers/anthropic/anthropic.py | 0 .../model_providers/anthropic/keys.py | 0 .../model_providers/model_providers.py | 0 .../model_providers/openai/__init__.py | 0 .../resources/model_providers/openai/keys.py | 0 .../model_providers/openai/openai.py | 0 .../resources/models.py | 0 .../resources/regions.py | 0 .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../chat/chat_completion_token_logprob.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/model.py | 0 .../types/model_list_response.py | 0 .../types/model_providers/__init__.py | 0 .../model_providers/anthropic/__init__.py | 0 .../anthropic/key_create_params.py | 0 .../anthropic/key_create_response.py | 0 .../anthropic/key_delete_response.py | 0 .../anthropic/key_list_agents_params.py | 0 .../anthropic/key_list_agents_response.py | 0 .../anthropic/key_list_params.py | 0 .../anthropic/key_list_response.py | 0 .../anthropic/key_retrieve_response.py | 0 .../anthropic/key_update_params.py | 0 .../anthropic/key_update_response.py | 0 .../types/model_providers/openai/__init__.py | 0 .../openai/key_create_params.py | 0 .../openai/key_create_response.py | 0 .../openai/key_delete_response.py | 0 .../model_providers/openai/key_list_params.py | 0 .../openai/key_list_response.py | 0 .../openai/key_retrieve_agents_params.py | 0 .../openai/key_retrieve_agents_response.py | 0 .../openai/key_retrieve_response.py | 0 .../openai/key_update_params.py | 0 .../openai/key_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../evaluation_metrics/test_workspaces.py | 4 +- .../workspaces/test_agents.py | 4 +- tests/api_resources/agents/test_api_keys.py | 4 +- .../agents/test_evaluation_datasets.py | 4 +- .../agents/test_evaluation_metrics.py | 4 +- .../agents/test_evaluation_runs.py | 4 +- .../agents/test_evaluation_test_cases.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_routes.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../knowledge_bases/test_indexing_jobs.py | 4 +- .../model_providers/anthropic/test_keys.py | 4 +- .../model_providers/openai/test_keys.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 46 ++-- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 268 files changed, 241 insertions(+), 241 deletions(-) rename src/{do_gradientai => gradientai}/__init__.py (95%) rename src/{do_gradientai => gradientai}/_base_client.py (99%) rename src/{do_gradientai => gradientai}/_client.py (100%) rename src/{do_gradientai => gradientai}/_compat.py (100%) rename src/{do_gradientai => gradientai}/_constants.py (100%) rename src/{do_gradientai => gradientai}/_exceptions.py (100%) rename src/{do_gradientai => gradientai}/_files.py (100%) rename src/{do_gradientai => gradientai}/_models.py (100%) rename src/{do_gradientai => gradientai}/_qs.py (100%) rename src/{do_gradientai => gradientai}/_resource.py (100%) rename src/{do_gradientai => gradientai}/_response.py (99%) rename src/{do_gradientai => gradientai}/_streaming.py (100%) rename src/{do_gradientai => gradientai}/_types.py (99%) rename src/{do_gradientai => gradientai}/_utils/__init__.py (100%) rename src/{do_gradientai => gradientai}/_utils/_logs.py (75%) rename src/{do_gradientai => gradientai}/_utils/_proxy.py (100%) rename src/{do_gradientai => gradientai}/_utils/_reflection.py (100%) rename src/{do_gradientai => gradientai}/_utils/_resources_proxy.py (50%) rename src/{do_gradientai => gradientai}/_utils/_streams.py (100%) rename src/{do_gradientai => gradientai}/_utils/_sync.py (100%) rename src/{do_gradientai => gradientai}/_utils/_transform.py (100%) rename src/{do_gradientai => gradientai}/_utils/_typing.py (100%) rename src/{do_gradientai => gradientai}/_utils/_utils.py (100%) rename src/{do_gradientai => gradientai}/_version.py (83%) rename src/{do_gradientai => gradientai}/py.typed (100%) rename src/{do_gradientai => gradientai}/resources/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/agents.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/api_keys.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_datasets.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/evaluation_metrics.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/agents.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/workspaces.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_runs.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_test_cases.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/functions.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/routes.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/versions.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/chat.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/completions.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/api_keys.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/inference.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/indexing_jobs.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/anthropic/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/anthropic/anthropic.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/anthropic/keys.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/model_providers.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/openai/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/openai/keys.py (100%) rename src/{do_gradientai => gradientai}/resources/model_providers/openai/openai.py (100%) rename src/{do_gradientai => gradientai}/resources/models.py (100%) rename src/{do_gradientai => gradientai}/resources/regions.py (100%) rename src/{do_gradientai => gradientai}/types/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agent_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_status_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_status_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_metric.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_metric_result.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_prompt.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_run.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_test_case.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_star_metric.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_star_metric_param.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_add_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_view_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent_model.py (100%) rename src/{do_gradientai => gradientai}/types/api_agreement.py (100%) rename src/{do_gradientai => gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_deployment_visibility.py (100%) rename src/{do_gradientai => gradientai}/types/api_knowledge_base.py (100%) rename src/{do_gradientai => gradientai}/types/api_model_version.py (100%) rename src/{do_gradientai => gradientai}/types/api_openai_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_retrieval_method.py (100%) rename src/{do_gradientai => gradientai}/types/api_workspace.py (100%) rename src/{do_gradientai => gradientai}/types/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/chat/chat_completion_token_logprob.py (100%) rename src/{do_gradientai => gradientai}/types/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{do_gradientai => gradientai}/types/model.py (100%) rename src/{do_gradientai => gradientai}/types/model_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_list_agents_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_list_agents_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/anthropic/key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_retrieve_agents_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_retrieve_agents_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_providers/openai/key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/region_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/region_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/shared/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/shared/api_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/api_meta.py (100%) diff --git a/.stats.yml b/.stats.yml index 291ef184..a1e73eb0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 77 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: ecf128ea21a8fead9dabb9609c4dbce8 +config_hash: 9c2519464cf5de240e34bd89b9f65706 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f59c83a..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. +modify the contents of the `src/gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index 54395c64..b8b2499d 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -55,7 +55,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from do_gradientai import AsyncGradientAI +from gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -96,8 +96,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import os import asyncio -from do_gradientai import DefaultAioHttpClient -from do_gradientai import AsyncGradientAI +from gradientai import DefaultAioHttpClient +from gradientai import AsyncGradientAI async def main() -> None: @@ -134,7 +134,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() @@ -153,16 +153,16 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `do_gradientai.APIError`. +All errors inherit from `gradientai.APIError`. ```python -import do_gradientai -from do_gradientai import GradientAI +import gradientai +from gradientai import GradientAI client = GradientAI() @@ -170,12 +170,12 @@ try: client.agents.versions.list( uuid="REPLACE_ME", ) -except do_gradientai.APIConnectionError as e: +except gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except do_gradientai.RateLimitError as e: +except gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except do_gradientai.APIStatusError as e: +except gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -203,7 +203,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -223,7 +223,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -277,7 +277,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() response = client.agents.versions.with_raw_response.list( @@ -289,9 +289,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -355,7 +355,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from do_gradientai import GradientAI, DefaultHttpxClient +from gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -378,7 +378,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from do_gradientai import GradientAI +from gradientai import GradientAI with GradientAI() as client: # make requests here @@ -406,8 +406,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import do_gradientai -print(do_gradientai.__version__) +import gradientai +print(gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 78a81061..9a2dd757 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from do_gradientai.types import APILinks, APIMeta +from gradientai.types import APILinks, APIMeta ``` # Agents @@ -9,7 +9,7 @@ from do_gradientai.types import APILinks, APIMeta Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -29,19 +29,19 @@ from do_gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -52,30 +52,30 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## EvaluationMetrics Types: ```python -from do_gradientai.types.agents import EvaluationMetricListResponse +from gradientai.types.agents import EvaluationMetricListResponse ``` Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse ### Workspaces Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ( +from gradientai.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -87,19 +87,19 @@ from do_gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -107,15 +107,15 @@ from do_gradientai.types.agents.evaluation_metrics.workspaces import ( Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ## EvaluationRuns Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -129,17 +129,17 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -152,18 +152,18 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -171,15 +171,15 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -188,43 +188,43 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -234,10 +234,10 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # ModelProviders @@ -248,7 +248,7 @@ Methods: Types: ```python -from do_gradientai.types.model_providers.anthropic import ( +from gradientai.types.model_providers.anthropic import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -260,12 +260,12 @@ from do_gradientai.types.model_providers.anthropic import ( Methods: -- client.model_providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.model_providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.model_providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.model_providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.model_providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.model_providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +- client.model_providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.model_providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.model_providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.model_providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.model_providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.model_providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse ## OpenAI @@ -274,7 +274,7 @@ Methods: Types: ```python -from do_gradientai.types.model_providers.openai import ( +from gradientai.types.model_providers.openai import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -286,31 +286,31 @@ from do_gradientai.types.model_providers.openai import ( Methods: -- client.model_providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.model_providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.model_providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.model_providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.model_providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.model_providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse +- client.model_providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.model_providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.model_providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.model_providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.model_providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.model_providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse # Regions Types: ```python -from do_gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -322,18 +322,18 @@ from do_gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -347,16 +347,16 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -369,11 +369,11 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Chat @@ -382,12 +382,12 @@ Methods: Types: ```python -from do_gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse +from gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # Inference @@ -396,7 +396,7 @@ Methods: Types: ```python -from do_gradientai.types.inference import ( +from gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -408,21 +408,21 @@ from do_gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Models Types: ```python -from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, Model, ModelListResponse +from gradientai.types import APIAgreement, APIModel, APIModelVersion, Model, ModelListResponse ``` Methods: -- client.models.retrieve(model) -> Model -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> Model +- client.models.list() -> ModelListResponse diff --git a/mypy.ini b/mypy.ini index 82b0c891..748d8234 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 5e38e921..76b45796 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,14 +78,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import do_gradientai'" +"check:importable" = "python -c 'import gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -98,7 +98,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/do_gradientai"] +packages = ["src/gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -201,7 +201,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["do_gradientai", "tests"] +known-first-party = ["gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index a320c1a8..2ff9a58c 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/do_gradientai/_version.py" + "src/gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index e46e909b..37b38f6f 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import do_gradientai' +rye run python -c 'import gradientai' diff --git a/src/do_gradientai/__init__.py b/src/gradientai/__init__.py similarity index 95% rename from src/do_gradientai/__init__.py rename to src/gradientai/__init__.py index 41b943b2..3316fe47 100644 --- a/src/do_gradientai/__init__.py +++ b/src/gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError +# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "do_gradientai" + __locals[__name].__module__ = "gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/do_gradientai/_base_client.py b/src/gradientai/_base_client.py similarity index 99% rename from src/do_gradientai/_base_client.py rename to src/gradientai/_base_client.py index 30108c9d..6dce600b 100644 --- a/src/do_gradientai/_base_client.py +++ b/src/gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/do_gradientai/_client.py b/src/gradientai/_client.py similarity index 100% rename from src/do_gradientai/_client.py rename to src/gradientai/_client.py diff --git a/src/do_gradientai/_compat.py b/src/gradientai/_compat.py similarity index 100% rename from src/do_gradientai/_compat.py rename to src/gradientai/_compat.py diff --git a/src/do_gradientai/_constants.py b/src/gradientai/_constants.py similarity index 100% rename from src/do_gradientai/_constants.py rename to src/gradientai/_constants.py diff --git a/src/do_gradientai/_exceptions.py b/src/gradientai/_exceptions.py similarity index 100% rename from src/do_gradientai/_exceptions.py rename to src/gradientai/_exceptions.py diff --git a/src/do_gradientai/_files.py b/src/gradientai/_files.py similarity index 100% rename from src/do_gradientai/_files.py rename to src/gradientai/_files.py diff --git a/src/do_gradientai/_models.py b/src/gradientai/_models.py similarity index 100% rename from src/do_gradientai/_models.py rename to src/gradientai/_models.py diff --git a/src/do_gradientai/_qs.py b/src/gradientai/_qs.py similarity index 100% rename from src/do_gradientai/_qs.py rename to src/gradientai/_qs.py diff --git a/src/do_gradientai/_resource.py b/src/gradientai/_resource.py similarity index 100% rename from src/do_gradientai/_resource.py rename to src/gradientai/_resource.py diff --git a/src/do_gradientai/_response.py b/src/gradientai/_response.py similarity index 99% rename from src/do_gradientai/_response.py rename to src/gradientai/_response.py index 8ca43971..2037e4ca 100644 --- a/src/do_gradientai/_response.py +++ b/src/gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", ) diff --git a/src/do_gradientai/_streaming.py b/src/gradientai/_streaming.py similarity index 100% rename from src/do_gradientai/_streaming.py rename to src/gradientai/_streaming.py diff --git a/src/do_gradientai/_types.py b/src/gradientai/_types.py similarity index 99% rename from src/do_gradientai/_types.py rename to src/gradientai/_types.py index c356c700..1bac876d 100644 --- a/src/do_gradientai/_types.py +++ b/src/gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from do_gradientai import NoneType +# from gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/do_gradientai/_utils/__init__.py b/src/gradientai/_utils/__init__.py similarity index 100% rename from src/do_gradientai/_utils/__init__.py rename to src/gradientai/_utils/__init__.py diff --git a/src/do_gradientai/_utils/_logs.py b/src/gradientai/_utils/_logs.py similarity index 75% rename from src/do_gradientai/_utils/_logs.py rename to src/gradientai/_utils/_logs.py index ac45b1a5..9047e5c8 100644 --- a/src/do_gradientai/_utils/_logs.py +++ b/src/gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("do_gradientai") +logger: logging.Logger = logging.getLogger("gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/do_gradientai/_utils/_proxy.py b/src/gradientai/_utils/_proxy.py similarity index 100% rename from src/do_gradientai/_utils/_proxy.py rename to src/gradientai/_utils/_proxy.py diff --git a/src/do_gradientai/_utils/_reflection.py b/src/gradientai/_utils/_reflection.py similarity index 100% rename from src/do_gradientai/_utils/_reflection.py rename to src/gradientai/_utils/_reflection.py diff --git a/src/do_gradientai/_utils/_resources_proxy.py b/src/gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/do_gradientai/_utils/_resources_proxy.py rename to src/gradientai/_utils/_resources_proxy.py index 03763c3b..b3bc4931 100644 --- a/src/do_gradientai/_utils/_resources_proxy.py +++ b/src/gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `do_gradientai.resources` module. + """A proxy for the `gradientai.resources` module. - This is used so that we can lazily import `do_gradientai.resources` only when - needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` + This is used so that we can lazily import `gradientai.resources` only when + needed *and* so that users can just import `gradientai` and reference `gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("do_gradientai.resources") + mod = importlib.import_module("gradientai.resources") return mod diff --git a/src/do_gradientai/_utils/_streams.py b/src/gradientai/_utils/_streams.py similarity index 100% rename from src/do_gradientai/_utils/_streams.py rename to src/gradientai/_utils/_streams.py diff --git a/src/do_gradientai/_utils/_sync.py b/src/gradientai/_utils/_sync.py similarity index 100% rename from src/do_gradientai/_utils/_sync.py rename to src/gradientai/_utils/_sync.py diff --git a/src/do_gradientai/_utils/_transform.py b/src/gradientai/_utils/_transform.py similarity index 100% rename from src/do_gradientai/_utils/_transform.py rename to src/gradientai/_utils/_transform.py diff --git a/src/do_gradientai/_utils/_typing.py b/src/gradientai/_utils/_typing.py similarity index 100% rename from src/do_gradientai/_utils/_typing.py rename to src/gradientai/_utils/_typing.py diff --git a/src/do_gradientai/_utils/_utils.py b/src/gradientai/_utils/_utils.py similarity index 100% rename from src/do_gradientai/_utils/_utils.py rename to src/gradientai/_utils/_utils.py diff --git a/src/do_gradientai/_version.py b/src/gradientai/_version.py similarity index 83% rename from src/do_gradientai/_version.py rename to src/gradientai/_version.py index 12e8e17e..a9dcbaa5 100644 --- a/src/do_gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "do_gradientai" +__title__ = "gradientai" __version__ = "0.1.0-alpha.5" # x-release-please-version diff --git a/src/do_gradientai/py.typed b/src/gradientai/py.typed similarity index 100% rename from src/do_gradientai/py.typed rename to src/gradientai/py.typed diff --git a/src/do_gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py similarity index 100% rename from src/do_gradientai/resources/__init__.py rename to src/gradientai/resources/__init__.py diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/__init__.py rename to src/gradientai/resources/agents/__init__.py diff --git a/src/do_gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py similarity index 100% rename from src/do_gradientai/resources/agents/agents.py rename to src/gradientai/resources/agents/agents.py diff --git a/src/do_gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py similarity index 100% rename from src/do_gradientai/resources/agents/api_keys.py rename to src/gradientai/resources/agents/api_keys.py diff --git a/src/do_gradientai/resources/agents/evaluation_datasets.py b/src/gradientai/resources/agents/evaluation_datasets.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_datasets.py rename to src/gradientai/resources/agents/evaluation_datasets.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/gradientai/resources/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py diff --git a/src/do_gradientai/resources/agents/evaluation_runs.py b/src/gradientai/resources/agents/evaluation_runs.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_runs.py rename to src/gradientai/resources/agents/evaluation_runs.py diff --git a/src/do_gradientai/resources/agents/evaluation_test_cases.py b/src/gradientai/resources/agents/evaluation_test_cases.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_test_cases.py rename to src/gradientai/resources/agents/evaluation_test_cases.py diff --git a/src/do_gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py similarity index 100% rename from src/do_gradientai/resources/agents/functions.py rename to src/gradientai/resources/agents/functions.py diff --git a/src/do_gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/do_gradientai/resources/agents/knowledge_bases.py rename to src/gradientai/resources/agents/knowledge_bases.py diff --git a/src/do_gradientai/resources/agents/routes.py b/src/gradientai/resources/agents/routes.py similarity index 100% rename from src/do_gradientai/resources/agents/routes.py rename to src/gradientai/resources/agents/routes.py diff --git a/src/do_gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 100% rename from src/do_gradientai/resources/agents/versions.py rename to src/gradientai/resources/agents/versions.py diff --git a/src/do_gradientai/resources/chat/__init__.py b/src/gradientai/resources/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/chat/__init__.py rename to src/gradientai/resources/chat/__init__.py diff --git a/src/do_gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py similarity index 100% rename from src/do_gradientai/resources/chat/chat.py rename to src/gradientai/resources/chat/chat.py diff --git a/src/do_gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py similarity index 100% rename from src/do_gradientai/resources/chat/completions.py rename to src/gradientai/resources/chat/completions.py diff --git a/src/do_gradientai/resources/inference/__init__.py b/src/gradientai/resources/inference/__init__.py similarity index 100% rename from src/do_gradientai/resources/inference/__init__.py rename to src/gradientai/resources/inference/__init__.py diff --git a/src/do_gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py similarity index 100% rename from src/do_gradientai/resources/inference/api_keys.py rename to src/gradientai/resources/inference/api_keys.py diff --git a/src/do_gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py similarity index 100% rename from src/do_gradientai/resources/inference/inference.py rename to src/gradientai/resources/inference/inference.py diff --git a/src/do_gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/__init__.py rename to src/gradientai/resources/knowledge_bases/__init__.py diff --git a/src/do_gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/data_sources.py rename to src/gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/gradientai/resources/knowledge_bases/indexing_jobs.py diff --git a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/do_gradientai/resources/model_providers/__init__.py b/src/gradientai/resources/model_providers/__init__.py similarity index 100% rename from src/do_gradientai/resources/model_providers/__init__.py rename to src/gradientai/resources/model_providers/__init__.py diff --git a/src/do_gradientai/resources/model_providers/anthropic/__init__.py b/src/gradientai/resources/model_providers/anthropic/__init__.py similarity index 100% rename from src/do_gradientai/resources/model_providers/anthropic/__init__.py rename to src/gradientai/resources/model_providers/anthropic/__init__.py diff --git a/src/do_gradientai/resources/model_providers/anthropic/anthropic.py b/src/gradientai/resources/model_providers/anthropic/anthropic.py similarity index 100% rename from src/do_gradientai/resources/model_providers/anthropic/anthropic.py rename to src/gradientai/resources/model_providers/anthropic/anthropic.py diff --git a/src/do_gradientai/resources/model_providers/anthropic/keys.py b/src/gradientai/resources/model_providers/anthropic/keys.py similarity index 100% rename from src/do_gradientai/resources/model_providers/anthropic/keys.py rename to src/gradientai/resources/model_providers/anthropic/keys.py diff --git a/src/do_gradientai/resources/model_providers/model_providers.py b/src/gradientai/resources/model_providers/model_providers.py similarity index 100% rename from src/do_gradientai/resources/model_providers/model_providers.py rename to src/gradientai/resources/model_providers/model_providers.py diff --git a/src/do_gradientai/resources/model_providers/openai/__init__.py b/src/gradientai/resources/model_providers/openai/__init__.py similarity index 100% rename from src/do_gradientai/resources/model_providers/openai/__init__.py rename to src/gradientai/resources/model_providers/openai/__init__.py diff --git a/src/do_gradientai/resources/model_providers/openai/keys.py b/src/gradientai/resources/model_providers/openai/keys.py similarity index 100% rename from src/do_gradientai/resources/model_providers/openai/keys.py rename to src/gradientai/resources/model_providers/openai/keys.py diff --git a/src/do_gradientai/resources/model_providers/openai/openai.py b/src/gradientai/resources/model_providers/openai/openai.py similarity index 100% rename from src/do_gradientai/resources/model_providers/openai/openai.py rename to src/gradientai/resources/model_providers/openai/openai.py diff --git a/src/do_gradientai/resources/models.py b/src/gradientai/resources/models.py similarity index 100% rename from src/do_gradientai/resources/models.py rename to src/gradientai/resources/models.py diff --git a/src/do_gradientai/resources/regions.py b/src/gradientai/resources/regions.py similarity index 100% rename from src/do_gradientai/resources/regions.py rename to src/gradientai/resources/regions.py diff --git a/src/do_gradientai/types/__init__.py b/src/gradientai/types/__init__.py similarity index 100% rename from src/do_gradientai/types/__init__.py rename to src/gradientai/types/__init__.py diff --git a/src/do_gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py similarity index 100% rename from src/do_gradientai/types/agent_create_params.py rename to src/gradientai/types/agent_create_params.py diff --git a/src/do_gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py similarity index 100% rename from src/do_gradientai/types/agent_create_response.py rename to src/gradientai/types/agent_create_response.py diff --git a/src/do_gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py similarity index 100% rename from src/do_gradientai/types/agent_delete_response.py rename to src/gradientai/types/agent_delete_response.py diff --git a/src/do_gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agent_list_params.py rename to src/gradientai/types/agent_list_params.py diff --git a/src/do_gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agent_list_response.py rename to src/gradientai/types/agent_list_response.py diff --git a/src/do_gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agent_retrieve_response.py rename to src/gradientai/types/agent_retrieve_response.py diff --git a/src/do_gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_params.py rename to src/gradientai/types/agent_update_params.py diff --git a/src/do_gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_response.py rename to src/gradientai/types/agent_update_response.py diff --git a/src/do_gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_params.py rename to src/gradientai/types/agent_update_status_params.py diff --git a/src/do_gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_response.py rename to src/gradientai/types/agent_update_status_response.py diff --git a/src/do_gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/__init__.py rename to src/gradientai/types/agents/__init__.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric.py b/src/gradientai/types/agents/api_evaluation_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric.py rename to src/gradientai/types/agents/api_evaluation_metric.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric_result.py b/src/gradientai/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric_result.py rename to src/gradientai/types/agents/api_evaluation_metric_result.py diff --git a/src/do_gradientai/types/agents/api_evaluation_prompt.py b/src/gradientai/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_prompt.py rename to src/gradientai/types/agents/api_evaluation_prompt.py diff --git a/src/do_gradientai/types/agents/api_evaluation_run.py b/src/gradientai/types/agents/api_evaluation_run.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_run.py rename to src/gradientai/types/agents/api_evaluation_run.py diff --git a/src/do_gradientai/types/agents/api_evaluation_test_case.py b/src/gradientai/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_test_case.py rename to src/gradientai/types/agents/api_evaluation_test_case.py diff --git a/src/do_gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_params.py rename to src/gradientai/types/agents/api_key_create_params.py diff --git a/src/do_gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_response.py rename to src/gradientai/types/agents/api_key_create_response.py diff --git a/src/do_gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_delete_response.py rename to src/gradientai/types/agents/api_key_delete_response.py diff --git a/src/do_gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_params.py rename to src/gradientai/types/agents/api_key_list_params.py diff --git a/src/do_gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_response.py rename to src/gradientai/types/agents/api_key_list_response.py diff --git a/src/do_gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_regenerate_response.py rename to src/gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/do_gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_params.py rename to src/gradientai/types/agents/api_key_update_params.py diff --git a/src/do_gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_response.py rename to src/gradientai/types/agents/api_key_update_response.py diff --git a/src/do_gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/do_gradientai/types/agents/api_link_knowledge_base_output.py rename to src/gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/do_gradientai/types/agents/api_star_metric.py b/src/gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric.py rename to src/gradientai/types/agents/api_star_metric.py diff --git a/src/do_gradientai/types/agents/api_star_metric_param.py b/src/gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric_param.py rename to src/gradientai/types/agents/api_star_metric_param.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_params.py b/src/gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_params.py rename to src/gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_response.py b/src/gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_response.py rename to src/gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_response.py b/src/gradientai/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_response.py rename to src/gradientai/types/agents/evaluation_metric_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradientai/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/__init__.py rename to src/gradientai/types/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_params.py b/src/gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_params.py rename to src/gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_response.py b/src/gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_response.py rename to src/gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradientai/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_response.py rename to src/gradientai/types/agents/evaluation_run_list_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py b/src/gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/gradientai/types/agents/evaluation_run_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/gradientai/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/gradientai/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_params.py b/src/gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_params.py rename to src/gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_response.py b/src/gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_response.py rename to src/gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_response.py rename to src/gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/gradientai/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/gradientai/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_params.py rename to src/gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_response.py b/src/gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_response.py rename to src/gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/do_gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_params.py rename to src/gradientai/types/agents/function_create_params.py diff --git a/src/do_gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_response.py rename to src/gradientai/types/agents/function_create_response.py diff --git a/src/do_gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_delete_response.py rename to src/gradientai/types/agents/function_delete_response.py diff --git a/src/do_gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_params.py rename to src/gradientai/types/agents/function_update_params.py diff --git a/src/do_gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_response.py rename to src/gradientai/types/agents/function_update_response.py diff --git a/src/do_gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/do_gradientai/types/agents/knowledge_base_detach_response.py rename to src/gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/do_gradientai/types/agents/route_add_params.py b/src/gradientai/types/agents/route_add_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_params.py rename to src/gradientai/types/agents/route_add_params.py diff --git a/src/do_gradientai/types/agents/route_add_response.py b/src/gradientai/types/agents/route_add_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_response.py rename to src/gradientai/types/agents/route_add_response.py diff --git a/src/do_gradientai/types/agents/route_delete_response.py b/src/gradientai/types/agents/route_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_delete_response.py rename to src/gradientai/types/agents/route_delete_response.py diff --git a/src/do_gradientai/types/agents/route_update_params.py b/src/gradientai/types/agents/route_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_params.py rename to src/gradientai/types/agents/route_update_params.py diff --git a/src/do_gradientai/types/agents/route_update_response.py b/src/gradientai/types/agents/route_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_response.py rename to src/gradientai/types/agents/route_update_response.py diff --git a/src/do_gradientai/types/agents/route_view_response.py b/src/gradientai/types/agents/route_view_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_view_response.py rename to src/gradientai/types/agents/route_view_response.py diff --git a/src/do_gradientai/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/do_gradientai/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/do_gradientai/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/do_gradientai/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/do_gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py similarity index 100% rename from src/do_gradientai/types/api_agent.py rename to src/gradientai/types/api_agent.py diff --git a/src/do_gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_agent_api_key_info.py rename to src/gradientai/types/api_agent_api_key_info.py diff --git a/src/do_gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py similarity index 100% rename from src/do_gradientai/types/api_agent_model.py rename to src/gradientai/types/api_agent_model.py diff --git a/src/do_gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py similarity index 100% rename from src/do_gradientai/types/api_agreement.py rename to src/gradientai/types/api_agreement.py diff --git a/src/do_gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_anthropic_api_key_info.py rename to src/gradientai/types/api_anthropic_api_key_info.py diff --git a/src/do_gradientai/types/api_deployment_visibility.py b/src/gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/do_gradientai/types/api_deployment_visibility.py rename to src/gradientai/types/api_deployment_visibility.py diff --git a/src/do_gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py similarity index 100% rename from src/do_gradientai/types/api_knowledge_base.py rename to src/gradientai/types/api_knowledge_base.py diff --git a/src/do_gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py similarity index 100% rename from src/do_gradientai/types/api_model_version.py rename to src/gradientai/types/api_model_version.py diff --git a/src/do_gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_openai_api_key_info.py rename to src/gradientai/types/api_openai_api_key_info.py diff --git a/src/do_gradientai/types/api_retrieval_method.py b/src/gradientai/types/api_retrieval_method.py similarity index 100% rename from src/do_gradientai/types/api_retrieval_method.py rename to src/gradientai/types/api_retrieval_method.py diff --git a/src/do_gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py similarity index 100% rename from src/do_gradientai/types/api_workspace.py rename to src/gradientai/types/api_workspace.py diff --git a/src/do_gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/chat/__init__.py rename to src/gradientai/types/chat/__init__.py diff --git a/src/do_gradientai/types/chat/chat_completion_token_logprob.py b/src/gradientai/types/chat/chat_completion_token_logprob.py similarity index 100% rename from src/do_gradientai/types/chat/chat_completion_token_logprob.py rename to src/gradientai/types/chat/chat_completion_token_logprob.py diff --git a/src/do_gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_params.py rename to src/gradientai/types/chat/completion_create_params.py diff --git a/src/do_gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_response.py rename to src/gradientai/types/chat/completion_create_response.py diff --git a/src/do_gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py similarity index 100% rename from src/do_gradientai/types/inference/__init__.py rename to src/gradientai/types/inference/__init__.py diff --git a/src/do_gradientai/types/inference/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_params.py rename to src/gradientai/types/inference/api_key_create_params.py diff --git a/src/do_gradientai/types/inference/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_response.py rename to src/gradientai/types/inference/api_key_create_response.py diff --git a/src/do_gradientai/types/inference/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_delete_response.py rename to src/gradientai/types/inference/api_key_delete_response.py diff --git a/src/do_gradientai/types/inference/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_params.py rename to src/gradientai/types/inference/api_key_list_params.py diff --git a/src/do_gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_response.py rename to src/gradientai/types/inference/api_key_list_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_params.py rename to src/gradientai/types/inference/api_key_update_params.py diff --git a/src/do_gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_regenerate_response.py rename to src/gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_response.py rename to src/gradientai/types/inference/api_key_update_response.py diff --git a/src/do_gradientai/types/inference/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/do_gradientai/types/inference/api_model_api_key_info.py rename to src/gradientai/types/inference/api_model_api_key_info.py diff --git a/src/do_gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_params.py rename to src/gradientai/types/knowledge_base_create_params.py diff --git a/src/do_gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_response.py rename to src/gradientai/types/knowledge_base_create_response.py diff --git a/src/do_gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_delete_response.py rename to src/gradientai/types/knowledge_base_delete_response.py diff --git a/src/do_gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_params.py rename to src/gradientai/types/knowledge_base_list_params.py diff --git a/src/do_gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_response.py rename to src/gradientai/types/knowledge_base_list_response.py diff --git a/src/do_gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_retrieve_response.py rename to src/gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_params.py rename to src/gradientai/types/knowledge_base_update_params.py diff --git a/src/do_gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_response.py rename to src/gradientai/types/knowledge_base_update_response.py diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/__init__.py rename to src/gradientai/types/knowledge_bases/__init__.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/gradientai/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexing_job.py rename to src/gradientai/types/knowledge_bases/api_indexing_job.py diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_params.py rename to src/gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_response.py rename to src/gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_params.py rename to src/gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_response.py rename to src/gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/do_gradientai/types/model.py b/src/gradientai/types/model.py similarity index 100% rename from src/do_gradientai/types/model.py rename to src/gradientai/types/model.py diff --git a/src/do_gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py similarity index 100% rename from src/do_gradientai/types/model_list_response.py rename to src/gradientai/types/model_list_response.py diff --git a/src/do_gradientai/types/model_providers/__init__.py b/src/gradientai/types/model_providers/__init__.py similarity index 100% rename from src/do_gradientai/types/model_providers/__init__.py rename to src/gradientai/types/model_providers/__init__.py diff --git a/src/do_gradientai/types/model_providers/anthropic/__init__.py b/src/gradientai/types/model_providers/anthropic/__init__.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/__init__.py rename to src/gradientai/types/model_providers/anthropic/__init__.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_create_params.py b/src/gradientai/types/model_providers/anthropic/key_create_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_create_params.py rename to src/gradientai/types/model_providers/anthropic/key_create_params.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_create_response.py b/src/gradientai/types/model_providers/anthropic/key_create_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_create_response.py rename to src/gradientai/types/model_providers/anthropic/key_create_response.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_delete_response.py b/src/gradientai/types/model_providers/anthropic/key_delete_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_delete_response.py rename to src/gradientai/types/model_providers/anthropic/key_delete_response.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_agents_params.py b/src/gradientai/types/model_providers/anthropic/key_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_list_agents_params.py rename to src/gradientai/types/model_providers/anthropic/key_list_agents_params.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py b/src/gradientai/types/model_providers/anthropic/key_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py rename to src/gradientai/types/model_providers/anthropic/key_list_agents_response.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_params.py b/src/gradientai/types/model_providers/anthropic/key_list_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_list_params.py rename to src/gradientai/types/model_providers/anthropic/key_list_params.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_response.py b/src/gradientai/types/model_providers/anthropic/key_list_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_list_response.py rename to src/gradientai/types/model_providers/anthropic/key_list_response.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_retrieve_response.py b/src/gradientai/types/model_providers/anthropic/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_retrieve_response.py rename to src/gradientai/types/model_providers/anthropic/key_retrieve_response.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_update_params.py b/src/gradientai/types/model_providers/anthropic/key_update_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_update_params.py rename to src/gradientai/types/model_providers/anthropic/key_update_params.py diff --git a/src/do_gradientai/types/model_providers/anthropic/key_update_response.py b/src/gradientai/types/model_providers/anthropic/key_update_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/anthropic/key_update_response.py rename to src/gradientai/types/model_providers/anthropic/key_update_response.py diff --git a/src/do_gradientai/types/model_providers/openai/__init__.py b/src/gradientai/types/model_providers/openai/__init__.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/__init__.py rename to src/gradientai/types/model_providers/openai/__init__.py diff --git a/src/do_gradientai/types/model_providers/openai/key_create_params.py b/src/gradientai/types/model_providers/openai/key_create_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_create_params.py rename to src/gradientai/types/model_providers/openai/key_create_params.py diff --git a/src/do_gradientai/types/model_providers/openai/key_create_response.py b/src/gradientai/types/model_providers/openai/key_create_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_create_response.py rename to src/gradientai/types/model_providers/openai/key_create_response.py diff --git a/src/do_gradientai/types/model_providers/openai/key_delete_response.py b/src/gradientai/types/model_providers/openai/key_delete_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_delete_response.py rename to src/gradientai/types/model_providers/openai/key_delete_response.py diff --git a/src/do_gradientai/types/model_providers/openai/key_list_params.py b/src/gradientai/types/model_providers/openai/key_list_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_list_params.py rename to src/gradientai/types/model_providers/openai/key_list_params.py diff --git a/src/do_gradientai/types/model_providers/openai/key_list_response.py b/src/gradientai/types/model_providers/openai/key_list_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_list_response.py rename to src/gradientai/types/model_providers/openai/key_list_response.py diff --git a/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_params.py b/src/gradientai/types/model_providers/openai/key_retrieve_agents_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_retrieve_agents_params.py rename to src/gradientai/types/model_providers/openai/key_retrieve_agents_params.py diff --git a/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/model_providers/openai/key_retrieve_agents_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py rename to src/gradientai/types/model_providers/openai/key_retrieve_agents_response.py diff --git a/src/do_gradientai/types/model_providers/openai/key_retrieve_response.py b/src/gradientai/types/model_providers/openai/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_retrieve_response.py rename to src/gradientai/types/model_providers/openai/key_retrieve_response.py diff --git a/src/do_gradientai/types/model_providers/openai/key_update_params.py b/src/gradientai/types/model_providers/openai/key_update_params.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_update_params.py rename to src/gradientai/types/model_providers/openai/key_update_params.py diff --git a/src/do_gradientai/types/model_providers/openai/key_update_response.py b/src/gradientai/types/model_providers/openai/key_update_response.py similarity index 100% rename from src/do_gradientai/types/model_providers/openai/key_update_response.py rename to src/gradientai/types/model_providers/openai/key_update_response.py diff --git a/src/do_gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py similarity index 100% rename from src/do_gradientai/types/region_list_params.py rename to src/gradientai/types/region_list_params.py diff --git a/src/do_gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py similarity index 100% rename from src/do_gradientai/types/region_list_response.py rename to src/gradientai/types/region_list_response.py diff --git a/src/do_gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py similarity index 100% rename from src/do_gradientai/types/shared/__init__.py rename to src/gradientai/types/shared/__init__.py diff --git a/src/do_gradientai/types/shared/api_links.py b/src/gradientai/types/shared/api_links.py similarity index 100% rename from src/do_gradientai/types/shared/api_links.py rename to src/gradientai/types/shared/api_links.py diff --git a/src/do_gradientai/types/shared/api_meta.py b/src/gradientai/types/shared/api_meta.py similarity index 100% rename from src/do_gradientai/types/shared/api_meta.py rename to src/gradientai/types/shared/api_meta.py diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index 42bfa79f..afeaa8f1 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ( +from gradientai.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index e772d668..764e13e0 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 65351922..beb9666a 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 9e6dad52..e6ca2644 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index 82084f61..be83e330 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import EvaluationMetricListResponse +from gradientai.types.agents import EvaluationMetricListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 6bd3cfa5..b2fce320 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index 87f66b24..a0b5ee77 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 2c5ceaf7..5a3693cb 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 0a007840..e62c05ff 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index e2e85ab8..2e6dfd7b 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 314cd2e2..79f73672 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 62f24534..b4c09579 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index c48a5420..90bf95b9 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.inference import ( +from gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 15665a84..9c466e2f 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index 206339e0..8bf1829f 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/model_providers/anthropic/test_keys.py b/tests/api_resources/model_providers/anthropic/test_keys.py index b6ba0e9a..fd4ffb0f 100644 --- a/tests/api_resources/model_providers/anthropic/test_keys.py +++ b/tests/api_resources/model_providers/anthropic/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.model_providers.anthropic import ( +from gradientai.types.model_providers.anthropic import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, diff --git a/tests/api_resources/model_providers/openai/test_keys.py b/tests/api_resources/model_providers/openai/test_keys.py index b398f5cc..f0f1eda0 100644 --- a/tests/api_resources/model_providers/openai/test_keys.py +++ b/tests/api_resources/model_providers/openai/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.model_providers.openai import ( +from gradientai.types.model_providers.openai import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 74c8cdab..2cc0e080 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 2132cd50..508820ce 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index e1f3457b..afee0c1f 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import Model, ModelListResponse +from gradientai.types import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 4ed5bb27..8e25617f 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/conftest.py b/tests/conftest.py index 6048de1a..39547c5d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from do_gradientai._utils import is_dict +from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("do_gradientai").setLevel(logging.DEBUG) +logging.getLogger("gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index 44dbc938..fc2c1325 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,11 +21,11 @@ from respx import MockRouter from pydantic import ValidationError -from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from do_gradientai._types import Omit -from do_gradientai._models import BaseModel, FinalRequestOptions -from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from do_gradientai._base_client import ( +from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from gradientai._types import Omit +from gradientai._models import BaseModel, FinalRequestOptions +from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -247,10 +247,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -804,7 +804,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -814,7 +814,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) @@ -824,7 +824,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -855,7 +855,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -880,7 +880,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1144,10 +1144,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1705,7 +1705,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1717,7 +1717,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1729,7 +1729,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1761,7 +1761,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1787,7 +1787,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -1823,8 +1823,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from do_gradientai._utils import asyncify - from do_gradientai._base_client import get_platform + from gradientai._utils import asyncify + from gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 5a98ce1b..9d1579a8 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from do_gradientai._utils import deepcopy_minimal +from gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 341e65ae..2905d59c 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from do_gradientai._types import FileTypes -from do_gradientai._utils import extract_files +from gradientai._types import FileTypes +from gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index ff7914bb..4a723313 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from do_gradientai._files import to_httpx_files, async_to_httpx_files +from gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 575dc3af..28aff1f3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from do_gradientai._utils import PropertyInfo -from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from do_gradientai._models import BaseModel, construct_type +from gradientai._utils import PropertyInfo +from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index c9213571..9080377b 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from do_gradientai._qs import Querystring, stringify +from gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index 434e9491..c4e6b9d8 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from do_gradientai._utils import required_args +from gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 001ce776..1a8f241e 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from do_gradientai import BaseModel, GradientAI, AsyncGradientAI -from do_gradientai._response import ( +from gradientai import BaseModel, GradientAI, AsyncGradientAI +from gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from do_gradientai._streaming import Stream -from do_gradientai._base_client import FinalRequestOptions +from gradientai._streaming import Stream +from gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index c1ce8e85..cdb41a77 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from gradientai import GradientAI, AsyncGradientAI +from gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 30c06d6a..825fe048 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from do_gradientai._types import NOT_GIVEN, Base64FileInput -from do_gradientai._utils import ( +from gradientai._types import NOT_GIVEN, Base64FileInput +from gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from do_gradientai._compat import PYDANTIC_V2 -from do_gradientai._models import BaseModel +from gradientai._compat import PYDANTIC_V2 +from gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 9ce2e0d3..3856b2c9 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from do_gradientai._utils import LazyProxy +from gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index c9129fdc..66ad064f 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from do_gradientai._utils import extract_type_var_from_base +from gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index 9def7c60..b539ed2c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from do_gradientai._types import Omit, NoneType -from do_gradientai._utils import ( +from gradientai._types import Omit, NoneType +from gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from do_gradientai._models import BaseModel +from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From b21050949cb9012dabc8d14bf7dca7301758616c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 21:25:55 +0000 Subject: [PATCH 082/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e8285b71..4f9005ea 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.5" + ".": "0.1.0-alpha.6" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 76b45796..ae329889 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.5" +version = "0.1.0-alpha.6" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index a9dcbaa5..b8ef5fc0 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.5" # x-release-please-version +__version__ = "0.1.0-alpha.6" # x-release-please-version From 864b328a6d5b2cb2e44cc6e31d2a26562c354b12 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 21:28:56 +0000 Subject: [PATCH 083/200] feat(client): add agent_domain option --- .stats.yml | 2 +- src/gradientai/_client.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index a1e73eb0..b2b8afeb 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 77 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 9c2519464cf5de240e34bd89b9f65706 +config_hash: 80395df07aec0cf06a5aa7caa2e3f4bc diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 0020ed16..327273c9 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -57,12 +57,14 @@ class GradientAI(SyncAPIClient): # client options api_key: str | None inference_key: str | None + agent_domain: str | None def __init__( self, *, api_key: str | None = None, inference_key: str | None = None, + agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -96,6 +98,8 @@ def __init__( inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") self.inference_key = inference_key + self.agent_domain = agent_domain + if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") self._base_url_overridden = base_url is not None @@ -201,6 +205,7 @@ def copy( *, api_key: str | None = None, inference_key: str | None = None, + agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, @@ -236,6 +241,7 @@ def copy( client = self.__class__( api_key=api_key or self.api_key, inference_key=inference_key or self.inference_key, + agent_domain=agent_domain or self.agent_domain, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -289,12 +295,14 @@ class AsyncGradientAI(AsyncAPIClient): # client options api_key: str | None inference_key: str | None + agent_domain: str | None def __init__( self, *, api_key: str | None = None, inference_key: str | None = None, + agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -328,6 +336,8 @@ def __init__( inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") self.inference_key = inference_key + self.agent_domain = agent_domain + if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") self._base_url_overridden = base_url is not None @@ -433,6 +443,7 @@ def copy( *, api_key: str | None = None, inference_key: str | None = None, + agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, @@ -468,6 +479,7 @@ def copy( client = self.__class__( api_key=api_key or self.api_key, inference_key=inference_key or self.inference_key, + agent_domain=agent_domain or self.agent_domain, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, From c851cd0140264e27ebc05407ce2e9d65282a5ee2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 22:18:15 +0000 Subject: [PATCH 084/200] feat(api): manual updates Added agent chat completions --- .stats.yml | 2 +- README.md | 8 +- api.md | 14 + src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 32 ++ .../resources/agents/chat/__init__.py | 33 ++ src/gradientai/resources/agents/chat/chat.py | 102 +++++ .../resources/agents/chat/completions.py | 385 ++++++++++++++++++ src/gradientai/types/agents/chat/__init__.py | 6 + .../agents/chat/completion_create_params.py | 185 +++++++++ .../agents/chat/completion_create_response.py | 81 ++++ tests/api_resources/agents/chat/__init__.py | 1 + .../agents/chat/test_completions.py | 186 +++++++++ 13 files changed, 1044 insertions(+), 5 deletions(-) create mode 100644 src/gradientai/resources/agents/chat/__init__.py create mode 100644 src/gradientai/resources/agents/chat/chat.py create mode 100644 src/gradientai/resources/agents/chat/completions.py create mode 100644 src/gradientai/types/agents/chat/__init__.py create mode 100644 src/gradientai/types/agents/chat/completion_create_params.py create mode 100644 src/gradientai/types/agents/chat/completion_create_response.py create mode 100644 tests/api_resources/agents/chat/__init__.py create mode 100644 tests/api_resources/agents/chat/test_completions.py diff --git a/.stats.yml b/.stats.yml index b2b8afeb..063cb477 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 77 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 80395df07aec0cf06a5aa7caa2e3f4bc +config_hash: 7cc5a33eb381780e5290d4871c3b30df diff --git a/README.md b/README.md index b8b2499d..5c9ac183 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) -completion = client.chat.completions.create( +completion = client.agents.chat.completions.create( messages=[ { "content": "string", @@ -63,7 +63,7 @@ client = AsyncGradientAI( async def main() -> None: - completion = await client.chat.completions.create( + completion = await client.agents.chat.completions.create( messages=[ { "content": "string", @@ -105,7 +105,7 @@ async def main() -> None: api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: - completion = await client.chat.completions.create( + completion = await client.agents.chat.completions.create( messages=[ { "content": "string", @@ -138,7 +138,7 @@ from gradientai import GradientAI client = GradientAI() -completion = client.chat.completions.create( +completion = client.agents.chat.completions.create( messages=[ { "content": "string", diff --git a/api.md b/api.md index 9a2dd757..dde6eaf2 100644 --- a/api.md +++ b/api.md @@ -58,6 +58,20 @@ Methods: - client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse - client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +## Chat + +### Completions + +Types: + +```python +from gradientai.types.agents.chat import ChatCompletionTokenLogprob, CompletionCreateResponse +``` + +Methods: + +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse + ## EvaluationMetrics Types: diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index f5423f00..51075283 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -88,6 +96,12 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 0a6e183c..200e9fc0 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -41,6 +41,14 @@ AsyncVersionsResourceWithStreamingResponse, ) from ..._compat import cached_property +from .chat.chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) from .functions import ( FunctionsResource, AsyncFunctionsResource, @@ -114,6 +122,10 @@ class AgentsResource(SyncAPIResource): def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) + @cached_property + def chat(self) -> ChatResource: + return ChatResource(self._client) + @cached_property def evaluation_metrics(self) -> EvaluationMetricsResource: return EvaluationMetricsResource(self._client) @@ -498,6 +510,10 @@ class AsyncAgentsResource(AsyncAPIResource): def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) + @cached_property + def chat(self) -> AsyncChatResource: + return AsyncChatResource(self._client) + @cached_property def evaluation_metrics(self) -> AsyncEvaluationMetricsResource: return AsyncEvaluationMetricsResource(self._client) @@ -904,6 +920,10 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def chat(self) -> ChatResourceWithRawResponse: + return ChatResourceWithRawResponse(self._agents.chat) + @cached_property def evaluation_metrics(self) -> EvaluationMetricsResourceWithRawResponse: return EvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics) @@ -964,6 +984,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def chat(self) -> AsyncChatResourceWithRawResponse: + return AsyncChatResourceWithRawResponse(self._agents.chat) + @cached_property def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithRawResponse: return AsyncEvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics) @@ -1024,6 +1048,10 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def chat(self) -> ChatResourceWithStreamingResponse: + return ChatResourceWithStreamingResponse(self._agents.chat) + @cached_property def evaluation_metrics(self) -> EvaluationMetricsResourceWithStreamingResponse: return EvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics) @@ -1084,6 +1112,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def chat(self) -> AsyncChatResourceWithStreamingResponse: + return AsyncChatResourceWithStreamingResponse(self._agents.chat) + @cached_property def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse: return AsyncEvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics) diff --git a/src/gradientai/resources/agents/chat/__init__.py b/src/gradientai/resources/agents/chat/__init__.py new file mode 100644 index 00000000..ec960eb4 --- /dev/null +++ b/src/gradientai/resources/agents/chat/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = [ + "CompletionsResource", + "AsyncCompletionsResource", + "CompletionsResourceWithRawResponse", + "AsyncCompletionsResourceWithRawResponse", + "CompletionsResourceWithStreamingResponse", + "AsyncCompletionsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/agents/chat/chat.py b/src/gradientai/resources/agents/chat/chat.py new file mode 100644 index 00000000..c87bd158 --- /dev/null +++ b/src/gradientai/resources/agents/chat/chat.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ...._compat import cached_property +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["ChatResource", "AsyncChatResource"] + + +class ChatResource(SyncAPIResource): + @cached_property + def completions(self) -> CompletionsResource: + return CompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> ChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ChatResourceWithStreamingResponse(self) + + +class AsyncChatResource(AsyncAPIResource): + @cached_property + def completions(self) -> AsyncCompletionsResource: + return AsyncCompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncChatResourceWithStreamingResponse(self) + + +class ChatResourceWithRawResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithRawResponse: + return CompletionsResourceWithRawResponse(self._chat.completions) + + +class AsyncChatResourceWithRawResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithRawResponse: + return AsyncCompletionsResourceWithRawResponse(self._chat.completions) + + +class ChatResourceWithStreamingResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithStreamingResponse: + return CompletionsResourceWithStreamingResponse(self._chat.completions) + + +class AsyncChatResourceWithStreamingResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithStreamingResponse: + return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions) diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py new file mode 100644 index 00000000..a213bf05 --- /dev/null +++ b/src/gradientai/resources/agents/chat/completions.py @@ -0,0 +1,385 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.agents.chat import completion_create_params +from ....types.agents.chat.completion_create_response import CompletionCreateResponse + +__all__ = ["CompletionsResource", "AsyncCompletionsResource"] + + +class CompletionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return CompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return CompletionsResourceWithStreamingResponse(self) + + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/chat/completions" + if self._client._base_url_overridden + else "https://inference.do-ai.run/v1/chat/completions", + body=maybe_transform( + { + "messages": messages, + "model": model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "n": n, + "presence_penalty": presence_penalty, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class AsyncCompletionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncCompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncCompletionsResourceWithStreamingResponse(self) + + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/chat/completions" + if self._client._base_url_overridden + else "https://inference.do-ai.run/v1/chat/completions", + body=await async_maybe_transform( + { + "messages": messages, + "model": model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "n": n, + "presence_penalty": presence_penalty, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class CompletionsResourceWithRawResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_raw_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithRawResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_raw_response_wrapper( + completions.create, + ) + + +class CompletionsResourceWithStreamingResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_streamed_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithStreamingResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_streamed_response_wrapper( + completions.create, + ) diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py new file mode 100644 index 00000000..9384ac14 --- /dev/null +++ b/src/gradientai/types/agents/chat/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py new file mode 100644 index 00000000..11d032ff --- /dev/null +++ b/src/gradientai/types/agents/chat/completion_create_params.py @@ -0,0 +1,185 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "CompletionCreateParams", + "Message", + "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestAssistantMessage", + "StreamOptions", +] + + +class CompletionCreateParams(TypedDict, total=False): + messages: Required[Iterable[Message]] + """A list of messages comprising the conversation so far.""" + + model: Required[str] + """Model ID used to generate the response.""" + + frequency_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on their existing frequency in the + text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + + logit_bias: Optional[Dict[str, int]] + """Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + """ + + logprobs: Optional[bool] + """Whether to return log probabilities of the output tokens or not. + + If true, returns the log probabilities of each output token returned in the + `content` of `message`. + """ + + max_completion_tokens: Optional[int] + """ + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + """ + + max_tokens: Optional[int] + """The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + n: Optional[int] + """How many chat completion choices to generate for each input message. + + Note that you will be charged based on the number of generated tokens across all + of the choices. Keep `n` as `1` to minimize costs. + """ + + presence_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on whether they appear in the text so + far, increasing the model's likelihood to talk about new topics. + """ + + stop: Union[Optional[str], List[str], None] + """Up to 4 sequences where the API will stop generating further tokens. + + The returned text will not contain the stop sequence. + """ + + stream: Optional[bool] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + stream_options: Optional[StreamOptions] + """Options for streaming response. Only set this when you set `stream: true`.""" + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + top_logprobs: Optional[int] + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: str + """ + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + """ + + +class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the system message.""" + + role: Required[Literal["system"]] + """The role of the messages author, in this case `system`.""" + + +class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + +class MessageChatCompletionRequestUserMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the user message.""" + + role: Required[Literal["user"]] + """The role of the messages author, in this case `user`.""" + + +class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): + role: Required[Literal["assistant"]] + """The role of the messages author, in this case `assistant`.""" + + content: Union[str, List[str], None] + """The contents of the assistant message.""" + + +Message: TypeAlias = Union[ + MessageChatCompletionRequestSystemMessage, + MessageChatCompletionRequestDeveloperMessage, + MessageChatCompletionRequestUserMessage, + MessageChatCompletionRequestAssistantMessage, +] + + +class StreamOptions(TypedDict, total=False): + include_usage: bool + """If set, an additional chunk will be streamed before the `data: [DONE]` message. + + The `usage` field on this chunk shows the token usage statistics for the entire + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. + """ diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py new file mode 100644 index 00000000..8c4968f8 --- /dev/null +++ b/src/gradientai/types/agents/chat/completion_create_response.py @@ -0,0 +1,81 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from ...chat.chat_completion_token_logprob import ChatCompletionTokenLogprob + +__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + + +class ChoiceMessage(BaseModel): + content: Optional[str] = None + """The contents of the message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Literal["assistant"] + """The role of the author of this message.""" + + +class Choice(BaseModel): + finish_reason: Literal["stop", "length"] + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, or `length` if the maximum number of tokens specified in the request + was reached. + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + message: ChoiceMessage + """A chat completion message generated by the model.""" + + +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + +class CompletionCreateResponse(BaseModel): + id: str + """A unique identifier for the chat completion.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can be more than one if `n` is greater than 1. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created.""" + + model: str + """The model used for the chat completion.""" + + object: Literal["chat.completion"] + """The object type, which is always `chat.completion`.""" + + usage: Optional[Usage] = None + """Usage statistics for the completion request.""" diff --git a/tests/api_resources/agents/chat/__init__.py b/tests/api_resources/agents/chat/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/agents/chat/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py new file mode 100644 index 00000000..89d531a5 --- /dev/null +++ b/tests/api_resources/agents/chat/test_completions.py @@ -0,0 +1,186 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents.chat import CompletionCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCompletions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + completion = client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + completion = client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncCompletions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + completion = await async_client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + completion = await async_client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True From 8e8be7fa091a490d4afb3b0696241f5a5f5a5c36 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 22:19:55 +0000 Subject: [PATCH 085/200] feat(api): manual updates made chat_token log prob shared --- .stats.yml | 2 +- api.md | 6 +++--- src/gradientai/types/__init__.py | 2 +- .../types/agents/chat/completion_create_response.py | 2 +- src/gradientai/types/chat/__init__.py | 1 - src/gradientai/types/chat/completion_create_response.py | 2 +- src/gradientai/types/shared/__init__.py | 1 + .../types/{chat => shared}/chat_completion_token_logprob.py | 0 8 files changed, 8 insertions(+), 8 deletions(-) rename src/gradientai/types/{chat => shared}/chat_completion_token_logprob.py (100%) diff --git a/.stats.yml b/.stats.yml index 063cb477..4ac9b8e7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 77 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 7cc5a33eb381780e5290d4871c3b30df +config_hash: 94c621a4008714c6ab5f773670265bcc diff --git a/api.md b/api.md index dde6eaf2..1bf4737d 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from gradientai.types import APILinks, APIMeta +from gradientai.types import APILinks, APIMeta, ChatCompletionTokenLogprob ``` # Agents @@ -65,7 +65,7 @@ Methods: Types: ```python -from gradientai.types.agents.chat import ChatCompletionTokenLogprob, CompletionCreateResponse +from gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: @@ -396,7 +396,7 @@ Methods: Types: ```python -from gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse ``` Methods: diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 626c3840..d5673350 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -3,7 +3,7 @@ from __future__ import annotations from .model import Model as Model -from .shared import APIMeta as APIMeta, APILinks as APILinks +from .shared import APIMeta as APIMeta, APILinks as APILinks, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .api_agent import APIAgent as APIAgent from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py index 8c4968f8..f2860c31 100644 --- a/src/gradientai/types/agents/chat/completion_create_response.py +++ b/src/gradientai/types/agents/chat/completion_create_response.py @@ -4,7 +4,7 @@ from typing_extensions import Literal from ...._models import BaseModel -from ...chat.chat_completion_token_logprob import ChatCompletionTokenLogprob +from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob __all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py index 59553f68..9384ac14 100644 --- a/src/gradientai/types/chat/__init__.py +++ b/src/gradientai/types/chat/__init__.py @@ -4,4 +4,3 @@ from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse -from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py index 1ac59a28..1791373b 100644 --- a/src/gradientai/types/chat/completion_create_response.py +++ b/src/gradientai/types/chat/completion_create_response.py @@ -4,7 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel -from .chat_completion_token_logprob import ChatCompletionTokenLogprob +from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob __all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] diff --git a/src/gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py index 5f02d62f..dc71bdd3 100644 --- a/src/gradientai/types/shared/__init__.py +++ b/src/gradientai/types/shared/__init__.py @@ -2,3 +2,4 @@ from .api_meta import APIMeta as APIMeta from .api_links import APILinks as APILinks +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/chat/chat_completion_token_logprob.py b/src/gradientai/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/gradientai/types/chat/chat_completion_token_logprob.py rename to src/gradientai/types/shared/chat_completion_token_logprob.py From 7e1ba1f374481c21fd260840113a79808c6906cb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 22:59:10 +0000 Subject: [PATCH 086/200] feat(api): manual updates Move models to platform api --- .stats.yml | 4 +- api.md | 5 +- src/gradientai/resources/models.py | 182 +++++++++++--------- src/gradientai/types/__init__.py | 3 +- src/gradientai/types/api_model.py | 32 ++++ src/gradientai/types/model.py | 21 --- src/gradientai/types/model_list_params.py | 42 +++++ src/gradientai/types/model_list_response.py | 13 +- tests/api_resources/test_models.py | 100 ++--------- 9 files changed, 212 insertions(+), 190 deletions(-) create mode 100644 src/gradientai/types/api_model.py delete mode 100644 src/gradientai/types/model.py create mode 100644 src/gradientai/types/model_list_params.py diff --git a/.stats.yml b/.stats.yml index 4ac9b8e7..79a36ab0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 77 +configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 94c621a4008714c6ab5f773670265bcc +config_hash: f0976fbc552ea878bb527447b5e663c9 diff --git a/api.md b/api.md index 1bf4737d..dc48f7b3 100644 --- a/api.md +++ b/api.md @@ -433,10 +433,9 @@ Methods: Types: ```python -from gradientai.types import APIAgreement, APIModel, APIModelVersion, Model, ModelListResponse +from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: -- client.models.retrieve(model) -> Model -- client.models.list() -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index da5462ae..c8e78b9b 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -2,9 +2,14 @@ from __future__ import annotations +from typing import List +from typing_extensions import Literal + import httpx +from ..types import model_list_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import ( @@ -13,7 +18,6 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..types.model import Model from .._base_client import make_request_options from ..types.model_list_response import ModelListResponse @@ -40,22 +44,52 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def retrieve( + def list( self, - model: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: + ) -> ModelListResponse: """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. + To list all models, send a GET request to `/v2/gen-ai/models`. Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -64,36 +98,24 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - f"/models/{model}" + "/v2/gen-ai/models" if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - - def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -119,22 +141,52 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def retrieve( + async def list( self, - model: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: + ) -> ModelListResponse: """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. + To list all models, send a GET request to `/v2/gen-ai/models`. Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -143,36 +195,24 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - f"/models/{model}" + "/v2/gen-ai/models" if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - - async def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return await self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -182,9 +222,6 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_raw_response_wrapper( - models.retrieve, - ) self.list = to_raw_response_wrapper( models.list, ) @@ -194,9 +231,6 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_raw_response_wrapper( - models.retrieve, - ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -206,9 +240,6 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_streamed_response_wrapper( - models.retrieve, - ) self.list = to_streamed_response_wrapper( models.list, ) @@ -218,9 +249,6 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_streamed_response_wrapper( - models.retrieve, - ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index d5673350..4ec63b92 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,14 +2,15 @@ from __future__ import annotations -from .model import Model as Model from .shared import APIMeta as APIMeta, APILinks as APILinks, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .api_agent import APIAgent as APIAgent +from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion +from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py new file mode 100644 index 00000000..c2bc1edd --- /dev/null +++ b/src/gradientai/types/api_model.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion + +__all__ = ["APIModel"] + + +class APIModel(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py deleted file mode 100644 index 2631ee8d..00000000 --- a/src/gradientai/types/model.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["Model"] - - -class Model(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" diff --git a/src/gradientai/types/model_list_params.py b/src/gradientai/types/model_list_params.py new file mode 100644 index 00000000..4abc1dc1 --- /dev/null +++ b/src/gradientai/types/model_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["ModelListParams"] + + +class ModelListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" + + public_only: bool + """only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 8f835449..47651759 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,15 +1,18 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal +from typing import List, Optional -from .model import Model from .._models import BaseModel +from .api_model import APIModel +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks __all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - data: List[Model] + links: Optional[APILinks] = None - object: Literal["list"] + meta: Optional[APIMeta] = None + + models: Optional[List[APIModel]] = None diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index afee0c1f..5e119f71 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import Model, ModelListResponse +from gradientai.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,50 +19,19 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - model = client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) + def test_method_list(self, client: GradientAI) -> None: + model = client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.models.with_raw_response.retrieve( - "llama3-8b-instruct", + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @@ -95,50 +64,19 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.models.with_raw_response.retrieve( - "llama3-8b-instruct", + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() From cd5277311736d07b991f15acd42e2d739996f6f8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 23:02:51 +0000 Subject: [PATCH 087/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4f9005ea..b5db7ce1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.6" + ".": "0.1.0-alpha.7" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index ae329889..74d4107a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index b8ef5fc0..d4e6dde6 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.6" # x-release-please-version +__version__ = "0.1.0-alpha.7" # x-release-please-version From 970bcb8a467c179b810219c9f902b2fa2105c2aa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 23:05:06 +0000 Subject: [PATCH 088/200] feat(client): setup streaming --- .stats.yml | 2 +- api.md | 2 +- src/gradientai/_client.py | 4 + src/gradientai/_streaming.py | 43 +- .../resources/agents/chat/completions.py | 535 +++++++++++++++++- src/gradientai/types/agents/chat/__init__.py | 1 + .../agents/chat/chat_completion_chunk.py | 93 +++ .../agents/chat/completion_create_params.py | 31 +- .../agents/chat/test_completions.py | 186 +++++- tests/test_client.py | 24 + 10 files changed, 893 insertions(+), 28 deletions(-) create mode 100644 src/gradientai/types/agents/chat/chat_completion_chunk.py diff --git a/.stats.yml b/.stats.yml index 79a36ab0..8eb4144d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: f0976fbc552ea878bb527447b5e663c9 +config_hash: e1b3d85ba9ae21d729a914c789422ba7 diff --git a/api.md b/api.md index dc48f7b3..b1ac8b43 100644 --- a/api.md +++ b/api.md @@ -65,7 +65,7 @@ Methods: Types: ```python -from gradientai.types.agents.chat import CompletionCreateResponse +from gradientai.types.agents.chat import ChatCompletionChunk, CompletionCreateResponse ``` Methods: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 327273c9..939d8c6f 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -117,6 +117,8 @@ def __init__( _strict_response_validation=_strict_response_validation, ) + self._default_stream_cls = Stream + @cached_property def agents(self) -> AgentsResource: from .resources.agents import AgentsResource @@ -355,6 +357,8 @@ def __init__( _strict_response_validation=_strict_response_validation, ) + self._default_stream_cls = AsyncStream + @cached_property def agents(self) -> AsyncAgentsResource: from .resources.agents import AsyncAgentsResource diff --git a/src/gradientai/_streaming.py b/src/gradientai/_streaming.py index bab5eb80..69a805ad 100644 --- a/src/gradientai/_streaming.py +++ b/src/gradientai/_streaming.py @@ -9,7 +9,8 @@ import httpx -from ._utils import extract_type_var_from_base +from ._utils import is_mapping, extract_type_var_from_base +from ._exceptions import APIError if TYPE_CHECKING: from ._client import GradientAI, AsyncGradientAI @@ -55,7 +56,25 @@ def __stream__(self) -> Iterator[_T]: iterator = self._iter_events() for sse in iterator: - yield process_data(data=sse.json(), cast_to=cast_to, response=response) + if sse.data.startswith("[DONE]"): + break + + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed for _sse in iterator: @@ -119,7 +138,25 @@ async def __stream__(self) -> AsyncIterator[_T]: iterator = self._iter_events() async for sse in iterator: - yield process_data(data=sse.json(), cast_to=cast_to, response=response) + if sse.data.startswith("[DONE]"): + break + + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed async for _sse in iterator: diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index a213bf05..92431cdf 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -3,11 +3,12 @@ from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform +from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import ( @@ -16,8 +17,10 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from ...._streaming import Stream, AsyncStream from ...._base_client import make_request_options from ....types.agents.chat import completion_create_params +from ....types.agents.chat.chat_completion_chunk import ChatCompletionChunk from ....types.agents.chat.completion_create_response import CompletionCreateResponse __all__ = ["CompletionsResource", "AsyncCompletionsResource"] @@ -43,6 +46,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ return CompletionsResourceWithStreamingResponse(self) + @overload def create( self, *, @@ -57,7 +61,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -153,6 +157,262 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @overload + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: Literal[True], + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Stream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: bool, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["messages", "model"], ["messages", "model", "stream"]) + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: return self._post( "/chat/completions" if self._client._base_url_overridden @@ -177,12 +437,16 @@ def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParams, + completion_create_params.CompletionCreateParamsStreaming + if stream + else completion_create_params.CompletionCreateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CompletionCreateResponse, + stream=stream or False, + stream_cls=Stream[ChatCompletionChunk], ) @@ -206,6 +470,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ return AsyncCompletionsResourceWithStreamingResponse(self) + @overload async def create( self, *, @@ -220,7 +485,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -316,6 +581,262 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @overload + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: Literal[True], + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncStream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: bool, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["messages", "model"], ["messages", "model", "stream"]) + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions" if self._client._base_url_overridden @@ -340,12 +861,16 @@ async def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParams, + completion_create_params.CompletionCreateParamsStreaming + if stream + else completion_create_params.CompletionCreateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CompletionCreateResponse, + stream=stream or False, + stream_cls=AsyncStream[ChatCompletionChunk], ) diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py index 9384ac14..f0243162 100644 --- a/src/gradientai/types/agents/chat/__init__.py +++ b/src/gradientai/types/agents/chat/__init__.py @@ -2,5 +2,6 @@ from __future__ import annotations +from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse diff --git a/src/gradientai/types/agents/chat/chat_completion_chunk.py b/src/gradientai/types/agents/chat/chat_completion_chunk.py new file mode 100644 index 00000000..b81aef72 --- /dev/null +++ b/src/gradientai/types/agents/chat/chat_completion_chunk.py @@ -0,0 +1,93 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob + +__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] + + +class ChoiceDelta(BaseModel): + content: Optional[str] = None + """The contents of the chunk message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Optional[Literal["developer", "user", "assistant"]] = None + """The role of the author of this message.""" + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + + +class Choice(BaseModel): + delta: ChoiceDelta + """A chat completion delta generated by streamed model responses.""" + + finish_reason: Optional[Literal["stop", "length"]] = None + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, or `length` if the maximum number of tokens specified in the request + was reached + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + +class ChatCompletionChunk(BaseModel): + id: str + """A unique identifier for the chat completion. Each chunk has the same ID.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can contain more than one elements if `n` is greater than 1. Can also be empty + for the last chunk if you set `stream_options: {"include_usage": true}`. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created. + + Each chunk has the same timestamp. + """ + + model: str + """The model to generate the completion.""" + + object: Literal["chat.completion.chunk"] + """The object type, which is always `chat.completion.chunk`.""" + + usage: Optional[Usage] = None + """ + An optional field that will only be present when you set + `stream_options: {"include_usage": true}` in your request. When present, it + contains a null value **except for the last chunk** which contains the token + usage statistics for the entire request. + + **NOTE:** If the stream is interrupted or cancelled, you may not receive the + final usage chunk which contains the total token usage for the request. + """ diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py index 11d032ff..ec5c6b70 100644 --- a/src/gradientai/types/agents/chat/completion_create_params.py +++ b/src/gradientai/types/agents/chat/completion_create_params.py @@ -6,17 +6,19 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ - "CompletionCreateParams", + "CompletionCreateParamsBase", "Message", "MessageChatCompletionRequestSystemMessage", "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", "StreamOptions", + "CompletionCreateParamsNonStreaming", + "CompletionCreateParamsStreaming", ] -class CompletionCreateParams(TypedDict, total=False): +class CompletionCreateParamsBase(TypedDict, total=False): messages: Required[Iterable[Message]] """A list of messages comprising the conversation so far.""" @@ -92,12 +94,6 @@ class CompletionCreateParams(TypedDict, total=False): The returned text will not contain the stop sequence. """ - stream: Optional[bool] - """ - If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - """ - stream_options: Optional[StreamOptions] """Options for streaming response. Only set this when you set `stream: true`.""" @@ -183,3 +179,22 @@ class StreamOptions(TypedDict, total=False): **NOTE:** If the stream is interrupted, you may not receive the final usage chunk which contains the total token usage for the request. """ + + +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + +class CompletionCreateParamsStreaming(CompletionCreateParamsBase): + stream: Required[Literal[True]] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + +CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming] diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 89d531a5..4630adfc 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -19,7 +19,7 @@ class TestCompletions: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: GradientAI) -> None: completion = client.agents.chat.completions.create( messages=[ { @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: completion = client.agents.chat.completions.create( messages=[ { @@ -51,7 +51,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: n=1, presence_penalty=-2, stop="\n", - stream=True, + stream=False, stream_options={"include_usage": True}, temperature=1, top_logprobs=0, @@ -62,7 +62,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: response = client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -80,7 +80,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: with client.agents.chat.completions.with_streaming_response.create( messages=[ { @@ -98,6 +98,89 @@ def test_streaming_response_create(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + completion_stream = client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + completion_stream.response.close() + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + completion_stream = client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + completion_stream.response.close() + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.agents.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.agents.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + class TestAsyncCompletions: parametrize = pytest.mark.parametrize( @@ -106,7 +189,7 @@ class TestAsyncCompletions: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: completion = await async_client.agents.chat.completions.create( messages=[ { @@ -120,7 +203,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: completion = await async_client.agents.chat.completions.create( messages=[ { @@ -138,7 +221,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI n=1, presence_penalty=-2, stop="\n", - stream=True, + stream=False, stream_options={"include_usage": True}, temperature=1, top_logprobs=0, @@ -149,7 +232,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -167,7 +250,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( messages=[ { @@ -184,3 +267,86 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> assert_matches_type(CompletionCreateResponse, completion, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + completion_stream = await async_client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + await completion_stream.response.aclose() + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + completion_stream = await async_client.agents.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + await completion_stream.response.aclose() + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = await response.parse() + await stream.close() + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True diff --git a/tests/test_client.py b/tests/test_client.py index fc2c1325..137fabed 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -24,6 +24,7 @@ from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError from gradientai._types import Omit from gradientai._models import BaseModel, FinalRequestOptions +from gradientai._streaming import Stream, AsyncStream from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from gradientai._base_client import ( DEFAULT_TIMEOUT, @@ -751,6 +752,17 @@ def test_client_max_retries_validation(self) -> None: max_retries=cast(Any, None), ) + @pytest.mark.respx(base_url=base_url) + def test_default_stream_cls(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + name: str + + respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + stream = self.client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model]) + assert isinstance(stream, Stream) + stream.response.close() + @pytest.mark.respx(base_url=base_url) def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: class Model(BaseModel): @@ -1650,6 +1662,18 @@ async def test_client_max_retries_validation(self) -> None: max_retries=cast(Any, None), ) + @pytest.mark.respx(base_url=base_url) + @pytest.mark.asyncio + async def test_default_stream_cls(self, respx_mock: MockRouter) -> None: + class Model(BaseModel): + name: str + + respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + + stream = await self.client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model]) + assert isinstance(stream, AsyncStream) + await stream.response.aclose() + @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: From e0aeefc13e27edff911da3107d6e96d209b7ee3f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 23:46:06 +0000 Subject: [PATCH 089/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b5db7ce1..c373724d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.7" + ".": "0.1.0-alpha.8" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 74d4107a..f38bdb85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.7" +version = "0.1.0-alpha.8" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index d4e6dde6..8c8f2b63 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.7" # x-release-please-version +__version__ = "0.1.0-alpha.8" # x-release-please-version From 887fa2b2509b929df6fdb5a96924a1bf434b3249 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 00:47:49 +0000 Subject: [PATCH 090/200] feat(api): manual updates add streaming to chat completions --- .stats.yml | 2 +- api.md | 4 +- .../resources/agents/chat/completions.py | 18 +- src/gradientai/resources/chat/completions.py | 535 +++++++++++++++++- src/gradientai/types/agents/chat/__init__.py | 2 +- .../chat/agent_chat_completion_chunk.py | 93 +++ src/gradientai/types/chat/__init__.py | 1 + .../chat/chat_completion_chunk.py | 4 +- .../types/chat/completion_create_params.py | 31 +- tests/api_resources/chat/test_completions.py | 186 +++++- 10 files changed, 838 insertions(+), 38 deletions(-) create mode 100644 src/gradientai/types/agents/chat/agent_chat_completion_chunk.py rename src/gradientai/types/{agents => }/chat/chat_completion_chunk.py (96%) diff --git a/.stats.yml b/.stats.yml index 8eb4144d..fcc630a7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: e1b3d85ba9ae21d729a914c789422ba7 +config_hash: 0bc3af28d4abd9be8bcc81f615bc832d diff --git a/api.md b/api.md index b1ac8b43..52551f1f 100644 --- a/api.md +++ b/api.md @@ -65,7 +65,7 @@ Methods: Types: ```python -from gradientai.types.agents.chat import ChatCompletionChunk, CompletionCreateResponse +from gradientai.types.agents.chat import AgentChatCompletionChunk, CompletionCreateResponse ``` Methods: @@ -396,7 +396,7 @@ Methods: Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse ``` Methods: diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 92431cdf..7acba243 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -20,8 +20,8 @@ from ...._streaming import Stream, AsyncStream from ...._base_client import make_request_options from ....types.agents.chat import completion_create_params -from ....types.agents.chat.chat_completion_chunk import ChatCompletionChunk from ....types.agents.chat.completion_create_response import CompletionCreateResponse +from ....types.agents.chat.agent_chat_completion_chunk import AgentChatCompletionChunk __all__ = ["CompletionsResource", "AsyncCompletionsResource"] @@ -186,7 +186,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Stream[ChatCompletionChunk]: + ) -> Stream[AgentChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -299,7 +299,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: + ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -412,7 +412,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: + ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]: return self._post( "/chat/completions" if self._client._base_url_overridden @@ -446,7 +446,7 @@ def create( ), cast_to=CompletionCreateResponse, stream=stream or False, - stream_cls=Stream[ChatCompletionChunk], + stream_cls=Stream[AgentChatCompletionChunk], ) @@ -610,7 +610,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncStream[ChatCompletionChunk]: + ) -> AsyncStream[AgentChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -723,7 +723,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: + ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -836,7 +836,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: + ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]: return await self._post( "/chat/completions" if self._client._base_url_overridden @@ -870,7 +870,7 @@ async def create( ), cast_to=CompletionCreateResponse, stream=stream or False, - stream_cls=AsyncStream[ChatCompletionChunk], + stream_cls=AsyncStream[AgentChatCompletionChunk], ) diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index 2d7c94c3..79f37901 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -3,11 +3,12 @@ from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform +from ..._utils import required_args, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -16,8 +17,10 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from ..._streaming import Stream, AsyncStream from ...types.chat import completion_create_params from ..._base_client import make_request_options +from ...types.chat.chat_completion_chunk import ChatCompletionChunk from ...types.chat.completion_create_response import CompletionCreateResponse __all__ = ["CompletionsResource", "AsyncCompletionsResource"] @@ -43,6 +46,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ return CompletionsResourceWithStreamingResponse(self) + @overload def create( self, *, @@ -57,7 +61,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -153,6 +157,262 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @overload + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: Literal[True], + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Stream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: bool, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["messages", "model"], ["messages", "model", "stream"]) + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: return self._post( "/chat/completions" if self._client._base_url_overridden @@ -177,12 +437,16 @@ def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParams, + completion_create_params.CompletionCreateParamsStreaming + if stream + else completion_create_params.CompletionCreateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CompletionCreateResponse, + stream=stream or False, + stream_cls=Stream[ChatCompletionChunk], ) @@ -206,6 +470,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ return AsyncCompletionsResourceWithStreamingResponse(self) + @overload async def create( self, *, @@ -220,7 +485,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -316,6 +581,262 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @overload + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: Literal[True], + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncStream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + stream: bool, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["messages", "model"], ["messages", "model", "stream"]) + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions" if self._client._base_url_overridden @@ -340,12 +861,16 @@ async def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParams, + completion_create_params.CompletionCreateParamsStreaming + if stream + else completion_create_params.CompletionCreateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=CompletionCreateResponse, + stream=stream or False, + stream_cls=AsyncStream[ChatCompletionChunk], ) diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py index f0243162..305ba0af 100644 --- a/src/gradientai/types/agents/chat/__init__.py +++ b/src/gradientai/types/agents/chat/__init__.py @@ -2,6 +2,6 @@ from __future__ import annotations -from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse +from .agent_chat_completion_chunk import AgentChatCompletionChunk as AgentChatCompletionChunk diff --git a/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py b/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py new file mode 100644 index 00000000..36ee3d9e --- /dev/null +++ b/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py @@ -0,0 +1,93 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob + +__all__ = ["AgentChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] + + +class ChoiceDelta(BaseModel): + content: Optional[str] = None + """The contents of the chunk message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Optional[Literal["developer", "user", "assistant"]] = None + """The role of the author of this message.""" + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + + +class Choice(BaseModel): + delta: ChoiceDelta + """A chat completion delta generated by streamed model responses.""" + + finish_reason: Optional[Literal["stop", "length"]] = None + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, or `length` if the maximum number of tokens specified in the request + was reached + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + +class AgentChatCompletionChunk(BaseModel): + id: str + """A unique identifier for the chat completion. Each chunk has the same ID.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can contain more than one elements if `n` is greater than 1. Can also be empty + for the last chunk if you set `stream_options: {"include_usage": true}`. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created. + + Each chunk has the same timestamp. + """ + + model: str + """The model to generate the completion.""" + + object: Literal["chat.completion.chunk"] + """The object type, which is always `chat.completion.chunk`.""" + + usage: Optional[Usage] = None + """ + An optional field that will only be present when you set + `stream_options: {"include_usage": true}` in your request. When present, it + contains a null value **except for the last chunk** which contains the token + usage statistics for the entire request. + + **NOTE:** If the stream is interrupted or cancelled, you may not receive the + final usage chunk which contains the total token usage for the request. + """ diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py index 9384ac14..f0243162 100644 --- a/src/gradientai/types/chat/__init__.py +++ b/src/gradientai/types/chat/__init__.py @@ -2,5 +2,6 @@ from __future__ import annotations +from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse diff --git a/src/gradientai/types/agents/chat/chat_completion_chunk.py b/src/gradientai/types/chat/chat_completion_chunk.py similarity index 96% rename from src/gradientai/types/agents/chat/chat_completion_chunk.py rename to src/gradientai/types/chat/chat_completion_chunk.py index b81aef72..4adcc63d 100644 --- a/src/gradientai/types/agents/chat/chat_completion_chunk.py +++ b/src/gradientai/types/chat/chat_completion_chunk.py @@ -3,8 +3,8 @@ from typing import List, Optional from typing_extensions import Literal -from ...._models import BaseModel -from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob +from ..._models import BaseModel +from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob __all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py index 11d032ff..ec5c6b70 100644 --- a/src/gradientai/types/chat/completion_create_params.py +++ b/src/gradientai/types/chat/completion_create_params.py @@ -6,17 +6,19 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ - "CompletionCreateParams", + "CompletionCreateParamsBase", "Message", "MessageChatCompletionRequestSystemMessage", "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", "StreamOptions", + "CompletionCreateParamsNonStreaming", + "CompletionCreateParamsStreaming", ] -class CompletionCreateParams(TypedDict, total=False): +class CompletionCreateParamsBase(TypedDict, total=False): messages: Required[Iterable[Message]] """A list of messages comprising the conversation so far.""" @@ -92,12 +94,6 @@ class CompletionCreateParams(TypedDict, total=False): The returned text will not contain the stop sequence. """ - stream: Optional[bool] - """ - If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - """ - stream_options: Optional[StreamOptions] """Options for streaming response. Only set this when you set `stream: true`.""" @@ -183,3 +179,22 @@ class StreamOptions(TypedDict, total=False): **NOTE:** If the stream is interrupted, you may not receive the final usage chunk which contains the total token usage for the request. """ + + +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + +class CompletionCreateParamsStreaming(CompletionCreateParamsBase): + stream: Required[Literal[True]] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + +CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming] diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index b4c09579..25b8419a 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -19,7 +19,7 @@ class TestCompletions: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: GradientAI) -> None: completion = client.chat.completions.create( messages=[ { @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: completion = client.chat.completions.create( messages=[ { @@ -51,7 +51,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: n=1, presence_penalty=-2, stop="\n", - stream=True, + stream=False, stream_options={"include_usage": True}, temperature=1, top_logprobs=0, @@ -62,7 +62,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { @@ -80,7 +80,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { @@ -98,6 +98,89 @@ def test_streaming_response_create(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + completion_stream = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + completion_stream.response.close() + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + completion_stream = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + completion_stream.response.close() + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + class TestAsyncCompletions: parametrize = pytest.mark.parametrize( @@ -106,7 +189,7 @@ class TestAsyncCompletions: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: completion = await async_client.chat.completions.create( messages=[ { @@ -120,7 +203,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: completion = await async_client.chat.completions.create( messages=[ { @@ -138,7 +221,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI n=1, presence_penalty=-2, stop="\n", - stream=True, + stream=False, stream_options={"include_usage": True}, temperature=1, top_logprobs=0, @@ -149,7 +232,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: response = await async_client.chat.completions.with_raw_response.create( messages=[ { @@ -167,7 +250,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: async with async_client.chat.completions.with_streaming_response.create( messages=[ { @@ -184,3 +267,86 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> assert_matches_type(CompletionCreateResponse, completion, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + completion_stream = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + await completion_stream.response.aclose() + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + completion_stream = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + await completion_stream.response.aclose() + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = await response.parse() + await stream.close() + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True From 17ff9be4bdfd6a3a3e8a35d5e004eae2ade92eda Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 01:00:39 +0000 Subject: [PATCH 091/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c373724d..46b9b6b2 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.8" + ".": "0.1.0-alpha.9" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index f38bdb85..47f201c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.8" +version = "0.1.0-alpha.9" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index 8c8f2b63..5cd8ca49 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.8" # x-release-please-version +__version__ = "0.1.0-alpha.9" # x-release-please-version From f585aed1de133e10104fa11cdca883e1b33a4a6a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 01:34:06 +0000 Subject: [PATCH 092/200] feat(api): manual updates --- .stats.yml | 2 +- README.md | 32 ++++++------- api.md | 28 +++++------ src/gradientai/_client.py | 72 ++++++++++++++-------------- src/gradientai/resources/__init__.py | 12 ++--- 5 files changed, 73 insertions(+), 73 deletions(-) diff --git a/.stats.yml b/.stats.yml index fcc630a7..ae5cfdf1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 0bc3af28d4abd9be8bcc81f615bc832d +config_hash: 651ae9b93d723d383facbf979fd97fee diff --git a/README.md b/README.md index 5c9ac183..f0d4ffa9 100644 --- a/README.md +++ b/README.md @@ -31,16 +31,16 @@ client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) -completion = client.agents.chat.completions.create( +completion = client.chat.completions.create( messages=[ { - "content": "string", - "role": "system", + "role": "user", + "content": "What is the capital of France?", } ], - model="llama3-8b-instruct", + model="llama3.3-70b-instruct", ) -print(completion.id) +print(completion.choices) ``` While you can provide an `api_key` keyword argument, @@ -63,16 +63,16 @@ client = AsyncGradientAI( async def main() -> None: - completion = await client.agents.chat.completions.create( + completion = await client.chat.completions.create( messages=[ { - "content": "string", - "role": "system", + "role": "user", + "content": "What is the capital of France?", } ], - model="llama3-8b-instruct", + model="llama3.3-70b-instruct", ) - print(completion.id) + print(completion.choices) asyncio.run(main()) @@ -105,16 +105,16 @@ async def main() -> None: api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: - completion = await client.agents.chat.completions.create( + completion = await client.chat.completions.create( messages=[ { - "content": "string", - "role": "system", + "role": "user", + "content": "What is the capital of France?", } ], - model="llama3-8b-instruct", + model="llama3.3-70b-instruct", ) - print(completion.id) + print(completion.choices) asyncio.run(main()) @@ -138,7 +138,7 @@ from gradientai import GradientAI client = GradientAI() -completion = client.agents.chat.completions.create( +completion = client.chat.completions.create( messages=[ { "content": "string", diff --git a/api.md b/api.md index 52551f1f..8cecd207 100644 --- a/api.md +++ b/api.md @@ -4,6 +4,20 @@ from gradientai.types import APILinks, APIMeta, ChatCompletionTokenLogprob ``` +# Chat + +## Completions + +Types: + +```python +from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse +``` + +Methods: + +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse + # Agents Types: @@ -389,20 +403,6 @@ Methods: - client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse - client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse -# Chat - -## Completions - -Types: - -```python -from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse -``` - -Methods: - -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse - # Inference ## APIKeys diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 939d8c6f..6e5d3d8e 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -119,6 +119,12 @@ def __init__( self._default_stream_cls = Stream + @cached_property + def chat(self) -> ChatResource: + from .resources.chat import ChatResource + + return ChatResource(self) + @cached_property def agents(self) -> AgentsResource: from .resources.agents import AgentsResource @@ -143,12 +149,6 @@ def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self) - @cached_property - def chat(self) -> ChatResource: - from .resources.chat import ChatResource - - return ChatResource(self) - @cached_property def inference(self) -> InferenceResource: from .resources.inference import InferenceResource @@ -359,6 +359,12 @@ def __init__( self._default_stream_cls = AsyncStream + @cached_property + def chat(self) -> AsyncChatResource: + from .resources.chat import AsyncChatResource + + return AsyncChatResource(self) + @cached_property def agents(self) -> AsyncAgentsResource: from .resources.agents import AsyncAgentsResource @@ -383,12 +389,6 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self) - @cached_property - def chat(self) -> AsyncChatResource: - from .resources.chat import AsyncChatResource - - return AsyncChatResource(self) - @cached_property def inference(self) -> AsyncInferenceResource: from .resources.inference import AsyncInferenceResource @@ -539,6 +539,12 @@ class GradientAIWithRawResponse: def __init__(self, client: GradientAI) -> None: self._client = client + @cached_property + def chat(self) -> chat.ChatResourceWithRawResponse: + from .resources.chat import ChatResourceWithRawResponse + + return ChatResourceWithRawResponse(self._client.chat) + @cached_property def agents(self) -> agents.AgentsResourceWithRawResponse: from .resources.agents import AgentsResourceWithRawResponse @@ -563,12 +569,6 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property - def chat(self) -> chat.ChatResourceWithRawResponse: - from .resources.chat import ChatResourceWithRawResponse - - return ChatResourceWithRawResponse(self._client.chat) - @cached_property def inference(self) -> inference.InferenceResourceWithRawResponse: from .resources.inference import InferenceResourceWithRawResponse @@ -588,6 +588,12 @@ class AsyncGradientAIWithRawResponse: def __init__(self, client: AsyncGradientAI) -> None: self._client = client + @cached_property + def chat(self) -> chat.AsyncChatResourceWithRawResponse: + from .resources.chat import AsyncChatResourceWithRawResponse + + return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: from .resources.agents import AsyncAgentsResourceWithRawResponse @@ -612,12 +618,6 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property - def chat(self) -> chat.AsyncChatResourceWithRawResponse: - from .resources.chat import AsyncChatResourceWithRawResponse - - return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property def inference(self) -> inference.AsyncInferenceResourceWithRawResponse: from .resources.inference import AsyncInferenceResourceWithRawResponse @@ -637,6 +637,12 @@ class GradientAIWithStreamedResponse: def __init__(self, client: GradientAI) -> None: self._client = client + @cached_property + def chat(self) -> chat.ChatResourceWithStreamingResponse: + from .resources.chat import ChatResourceWithStreamingResponse + + return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property def agents(self) -> agents.AgentsResourceWithStreamingResponse: from .resources.agents import AgentsResourceWithStreamingResponse @@ -661,12 +667,6 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property - def chat(self) -> chat.ChatResourceWithStreamingResponse: - from .resources.chat import ChatResourceWithStreamingResponse - - return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property def inference(self) -> inference.InferenceResourceWithStreamingResponse: from .resources.inference import InferenceResourceWithStreamingResponse @@ -686,6 +686,12 @@ class AsyncGradientAIWithStreamedResponse: def __init__(self, client: AsyncGradientAI) -> None: self._client = client + @cached_property + def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: + from .resources.chat import AsyncChatResourceWithStreamingResponse + + return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: from .resources.agents import AsyncAgentsResourceWithStreamingResponse @@ -710,12 +716,6 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property - def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: - from .resources.chat import AsyncChatResourceWithStreamingResponse - - return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse: from .resources.inference import AsyncInferenceResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 785bf1ac..33bfffff 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -58,6 +58,12 @@ ) __all__ = [ + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", @@ -82,12 +88,6 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", "InferenceResource", "AsyncInferenceResource", "InferenceResourceWithRawResponse", From 327cbe9be8607b22618d8eb751049e4cdb6f403f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 01:49:55 +0000 Subject: [PATCH 093/200] feat(api): manual updates force readme update --- .stats.yml | 2 +- README.md | 96 ++++++++++++++++++++++++++++----- tests/test_client.py | 126 ++++++++++++++++++++++++++++++++++--------- 3 files changed, 186 insertions(+), 38 deletions(-) diff --git a/.stats.yml b/.stats.yml index ae5cfdf1..9c50e58e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 651ae9b93d723d383facbf979fd97fee +config_hash: 67ce33bbbf8698b50194d8da5fd009d6 diff --git a/README.md b/README.md index f0d4ffa9..a11725da 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,50 @@ async def main() -> None: asyncio.run(main()) ``` +## Streaming responses + +We provide support for streaming responses using Server Side Events (SSE). + +```python +from gradientai import GradientAI + +client = GradientAI() + +stream = client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "What is the capital of France?", + } + ], + model="llama3.3-70b-instruct", + stream=True, +) +for completion in stream: + print(completion.choices) +``` + +The async client uses the exact same interface. + +```python +from gradientai import AsyncGradientAI + +client = AsyncGradientAI() + +stream = await client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "What is the capital of France?", + } + ], + model="llama3.3-70b-instruct", + stream=True, +) +async for completion in stream: + print(completion.choices) +``` + ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: @@ -167,8 +211,14 @@ from gradientai import GradientAI client = GradientAI() try: - client.agents.versions.list( - uuid="REPLACE_ME", + client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "What is the capital of France?", + } + ], + model="llama3.3-70b-instruct", ) except gradientai.APIConnectionError as e: print("The server could not be reached") @@ -212,8 +262,14 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).agents.versions.list( - uuid="REPLACE_ME", +client.with_options(max_retries=5).chat.completions.create( + messages=[ + { + "role": "user", + "content": "What is the capital of France?", + } + ], + model="llama3.3-70b-instruct", ) ``` @@ -237,8 +293,14 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).agents.versions.list( - uuid="REPLACE_ME", +client.with_options(timeout=5.0).chat.completions.create( + messages=[ + { + "role": "user", + "content": "What is the capital of France?", + } + ], + model="llama3.3-70b-instruct", ) ``` @@ -280,13 +342,17 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.agents.versions.with_raw_response.list( - uuid="REPLACE_ME", +response = client.chat.completions.with_raw_response.create( + messages=[{ + "role": "user", + "content": "What is the capital of France?", + }], + model="llama3.3-70b-instruct", ) print(response.headers.get('X-My-Header')) -version = response.parse() # get the object that `agents.versions.list()` would have returned -print(version.agent_versions) +completion = response.parse() # get the object that `chat.completions.create()` would have returned +print(completion.choices) ``` These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. @@ -300,8 +366,14 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.agents.versions.with_streaming_response.list( - uuid="REPLACE_ME", +with client.chat.completions.with_streaming_response.create( + messages=[ + { + "role": "user", + "content": "What is the capital of France?", + } + ], + model="llama3.3-70b-instruct", ) as response: print(response.headers.get("X-My-Header")) diff --git a/tests/test_client.py b/tests/test_client.py index 137fabed..5fe40b1e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -819,20 +819,36 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ).__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) + respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ).__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -859,9 +875,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list(uuid="uuid") + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -883,10 +907,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list( - uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": Omit()}, ) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -908,10 +939,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list( - uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": "42"}, ) assert response.http_request.headers.get("x-stainless-retry-count") == "42" @@ -1734,10 +1772,18 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI ) -> None: - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ).__aenter__() assert _get_open_connections(self.client) == 0 @@ -1746,10 +1792,18 @@ async def test_retrying_timeout_errors_doesnt_leak( async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI ) -> None: - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) + respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ).__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1777,9 +1831,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list(uuid="uuid") + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1802,10 +1864,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list( - uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": Omit()}, ) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -1828,10 +1897,17 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - - response = await client.agents.versions.with_raw_response.list( - uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} + respx_mock.post("/chat/completions").mock(side_effect=retry_handler) + + response = await client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + extra_headers={"x-stainless-retry-count": "42"}, ) assert response.http_request.headers.get("x-stainless-retry-count") == "42" From d58748910aa3426d56fbb0c8e8b8ac12645b75d2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 01:56:22 +0000 Subject: [PATCH 094/200] feat(api): manual updates change order --- .stats.yml | 2 +- README.md | 24 +++++----- api.md | 28 +++++------ src/gradientai/_client.py | 72 ++++++++++++++-------------- src/gradientai/resources/__init__.py | 12 ++--- tests/test_client.py | 20 ++++---- 6 files changed, 79 insertions(+), 79 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9c50e58e..ce83998c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 67ce33bbbf8698b50194d8da5fd009d6 +config_hash: 9b44ce3fd39c43f2001bc11934e6b1b0 diff --git a/README.md b/README.md index a11725da..f5fc7b7d 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) -completion = client.chat.completions.create( +completion = client.agents.chat.completions.create( messages=[ { "role": "user", @@ -63,7 +63,7 @@ client = AsyncGradientAI( async def main() -> None: - completion = await client.chat.completions.create( + completion = await client.agents.chat.completions.create( messages=[ { "role": "user", @@ -105,7 +105,7 @@ async def main() -> None: api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: - completion = await client.chat.completions.create( + completion = await client.agents.chat.completions.create( messages=[ { "role": "user", @@ -129,7 +129,7 @@ from gradientai import GradientAI client = GradientAI() -stream = client.chat.completions.create( +stream = client.agents.chat.completions.create( messages=[ { "role": "user", @@ -150,7 +150,7 @@ from gradientai import AsyncGradientAI client = AsyncGradientAI() -stream = await client.chat.completions.create( +stream = await client.agents.chat.completions.create( messages=[ { "role": "user", @@ -182,7 +182,7 @@ from gradientai import GradientAI client = GradientAI() -completion = client.chat.completions.create( +completion = client.agents.chat.completions.create( messages=[ { "content": "string", @@ -211,7 +211,7 @@ from gradientai import GradientAI client = GradientAI() try: - client.chat.completions.create( + client.agents.chat.completions.create( messages=[ { "role": "user", @@ -262,7 +262,7 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).chat.completions.create( +client.with_options(max_retries=5).agents.chat.completions.create( messages=[ { "role": "user", @@ -293,7 +293,7 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).chat.completions.create( +client.with_options(timeout=5.0).agents.chat.completions.create( messages=[ { "role": "user", @@ -342,7 +342,7 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.chat.completions.with_raw_response.create( +response = client.agents.chat.completions.with_raw_response.create( messages=[{ "role": "user", "content": "What is the capital of France?", @@ -351,7 +351,7 @@ response = client.chat.completions.with_raw_response.create( ) print(response.headers.get('X-My-Header')) -completion = response.parse() # get the object that `chat.completions.create()` would have returned +completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned print(completion.choices) ``` @@ -366,7 +366,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.chat.completions.with_streaming_response.create( +with client.agents.chat.completions.with_streaming_response.create( messages=[ { "role": "user", diff --git a/api.md b/api.md index 8cecd207..fc8d20eb 100644 --- a/api.md +++ b/api.md @@ -4,20 +4,6 @@ from gradientai.types import APILinks, APIMeta, ChatCompletionTokenLogprob ``` -# Chat - -## Completions - -Types: - -```python -from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse -``` - -Methods: - -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse - # Agents Types: @@ -267,6 +253,20 @@ Methods: - client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse - client.agents.routes.view(uuid) -> RouteViewResponse +# Chat + +## Completions + +Types: + +```python +from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse +``` + +Methods: + +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse + # ModelProviders ## Anthropic diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 6e5d3d8e..c9fe6733 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -119,18 +119,18 @@ def __init__( self._default_stream_cls = Stream - @cached_property - def chat(self) -> ChatResource: - from .resources.chat import ChatResource - - return ChatResource(self) - @cached_property def agents(self) -> AgentsResource: from .resources.agents import AgentsResource return AgentsResource(self) + @cached_property + def chat(self) -> ChatResource: + from .resources.chat import ChatResource + + return ChatResource(self) + @cached_property def model_providers(self) -> ModelProvidersResource: from .resources.model_providers import ModelProvidersResource @@ -359,18 +359,18 @@ def __init__( self._default_stream_cls = AsyncStream - @cached_property - def chat(self) -> AsyncChatResource: - from .resources.chat import AsyncChatResource - - return AsyncChatResource(self) - @cached_property def agents(self) -> AsyncAgentsResource: from .resources.agents import AsyncAgentsResource return AsyncAgentsResource(self) + @cached_property + def chat(self) -> AsyncChatResource: + from .resources.chat import AsyncChatResource + + return AsyncChatResource(self) + @cached_property def model_providers(self) -> AsyncModelProvidersResource: from .resources.model_providers import AsyncModelProvidersResource @@ -539,18 +539,18 @@ class GradientAIWithRawResponse: def __init__(self, client: GradientAI) -> None: self._client = client - @cached_property - def chat(self) -> chat.ChatResourceWithRawResponse: - from .resources.chat import ChatResourceWithRawResponse - - return ChatResourceWithRawResponse(self._client.chat) - @cached_property def agents(self) -> agents.AgentsResourceWithRawResponse: from .resources.agents import AgentsResourceWithRawResponse return AgentsResourceWithRawResponse(self._client.agents) + @cached_property + def chat(self) -> chat.ChatResourceWithRawResponse: + from .resources.chat import ChatResourceWithRawResponse + + return ChatResourceWithRawResponse(self._client.chat) + @cached_property def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse: from .resources.model_providers import ModelProvidersResourceWithRawResponse @@ -588,18 +588,18 @@ class AsyncGradientAIWithRawResponse: def __init__(self, client: AsyncGradientAI) -> None: self._client = client - @cached_property - def chat(self) -> chat.AsyncChatResourceWithRawResponse: - from .resources.chat import AsyncChatResourceWithRawResponse - - return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: from .resources.agents import AsyncAgentsResourceWithRawResponse return AsyncAgentsResourceWithRawResponse(self._client.agents) + @cached_property + def chat(self) -> chat.AsyncChatResourceWithRawResponse: + from .resources.chat import AsyncChatResourceWithRawResponse + + return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse: from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse @@ -637,18 +637,18 @@ class GradientAIWithStreamedResponse: def __init__(self, client: GradientAI) -> None: self._client = client - @cached_property - def chat(self) -> chat.ChatResourceWithStreamingResponse: - from .resources.chat import ChatResourceWithStreamingResponse - - return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property def agents(self) -> agents.AgentsResourceWithStreamingResponse: from .resources.agents import AgentsResourceWithStreamingResponse return AgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def chat(self) -> chat.ChatResourceWithStreamingResponse: + from .resources.chat import ChatResourceWithStreamingResponse + + return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse: from .resources.model_providers import ModelProvidersResourceWithStreamingResponse @@ -686,18 +686,18 @@ class AsyncGradientAIWithStreamedResponse: def __init__(self, client: AsyncGradientAI) -> None: self._client = client - @cached_property - def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: - from .resources.chat import AsyncChatResourceWithStreamingResponse - - return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: from .resources.agents import AsyncAgentsResourceWithStreamingResponse return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: + from .resources.chat import AsyncChatResourceWithStreamingResponse + + return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse: from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 33bfffff..b56e7e4c 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -58,18 +58,18 @@ ) __all__ = [ - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", "ModelProvidersResource", "AsyncModelProvidersResource", "ModelProvidersResourceWithRawResponse", diff --git a/tests/test_client.py b/tests/test_client.py index 5fe40b1e..16220895 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -822,7 +822,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.chat.completions.with_streaming_response.create( + client.agents.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -840,7 +840,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.chat.completions.with_streaming_response.create( + client.agents.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -877,7 +877,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.chat.completions.with_raw_response.create( + response = client.agents.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -909,7 +909,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.chat.completions.with_raw_response.create( + response = client.agents.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -941,7 +941,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.chat.completions.with_raw_response.create( + response = client.agents.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -1775,7 +1775,7 @@ async def test_retrying_timeout_errors_doesnt_leak( respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.chat.completions.with_streaming_response.create( + await async_client.agents.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -1795,7 +1795,7 @@ async def test_retrying_status_errors_doesnt_leak( respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.chat.completions.with_streaming_response.create( + await async_client.agents.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -1833,7 +1833,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.chat.completions.with_raw_response.create( + response = await client.agents.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -1866,7 +1866,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.chat.completions.with_raw_response.create( + response = await client.agents.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -1899,7 +1899,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.chat.completions.with_raw_response.create( + response = await client.agents.chat.completions.with_raw_response.create( messages=[ { "content": "string", From fc3be1fb3b9d0ac075ec6dac6d9c1775842f9526 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 05:15:19 +0000 Subject: [PATCH 095/200] chore(internal): codegen related update --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 46b9b6b2..3b005e52 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.9" + ".": "0.1.0-alpha.10" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 47f201c1..807b47af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.9" +version = "0.1.0-alpha.10" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index 5cd8ca49..f1fdf3c0 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.9" # x-release-please-version +__version__ = "0.1.0-alpha.10" # x-release-please-version From 4214e34493b16a23865877682ed116a8eadc616c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 08:58:05 +0000 Subject: [PATCH 096/200] chore(ci): only run for pushes and fork pull requests --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6bfd00b1..aef77646 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,6 +17,7 @@ jobs: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -42,6 +43,7 @@ jobs: contents: read id-token: write runs-on: depot-ubuntu-24.04 + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -62,6 +64,7 @@ jobs: timeout-minutes: 10 name: test runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 From 5656c6dc9b29b8b1d20f3f379d4df88053f77571 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 16:55:42 +0000 Subject: [PATCH 097/200] feat(api): manual updates chore: Move model providers --- .github/workflows/ci.yml | 3 - .stats.yml | 2 +- api.md | 106 ++++--- src/gradientai/_client.py | 41 +-- src/gradientai/resources/__init__.py | 14 - .../model_providers/anthropic/__init__.py | 33 --- .../model_providers/anthropic/anthropic.py | 102 ------- .../model_providers/openai/__init__.py | 33 --- .../model_providers/openai/openai.py | 102 ------- src/gradientai/resources/models/__init__.py | 33 +++ .../resources/{ => models}/models.py | 48 +++- .../providers}/__init__.py | 26 +- .../keys.py => models/providers/anthropic.py} | 180 ++++++------ .../keys.py => models/providers/openai.py} | 180 ++++++------ .../providers/providers.py} | 70 ++--- .../model_providers/anthropic/__init__.py | 14 - .../types/model_providers/openai/__init__.py | 14 - .../{model_providers => models}/__init__.py | 0 .../types/models/providers/__init__.py | 24 ++ .../providers/anthropic_create_params.py} | 4 +- .../providers/anthropic_create_response.py} | 4 +- .../providers/anthropic_delete_response.py} | 4 +- .../anthropic_list_agents_params.py} | 4 +- .../anthropic_list_agents_response.py} | 4 +- .../providers/anthropic_list_params.py} | 4 +- .../providers/anthropic_list_response.py} | 4 +- .../providers/anthropic_retrieve_response.py} | 4 +- .../providers/anthropic_update_params.py} | 4 +- .../providers/anthropic_update_response.py} | 4 +- .../providers/openai_create_params.py} | 4 +- .../providers/openai_create_response.py} | 4 +- .../providers/openai_delete_response.py} | 4 +- .../providers/openai_list_params.py} | 4 +- .../providers/openai_list_response.py} | 4 +- .../openai_retrieve_agents_params.py} | 4 +- .../openai_retrieve_agents_response.py} | 4 +- .../providers/openai_retrieve_response.py} | 4 +- .../providers/openai_update_params.py} | 4 +- .../providers/openai_update_response.py} | 4 +- .../model_providers/openai/__init__.py | 1 - .../{model_providers => models}/__init__.py | 0 .../providers}/__init__.py | 0 .../providers/test_anthropic.py} | 258 +++++++++--------- .../providers/test_openai.py} | 258 +++++++++--------- 44 files changed, 677 insertions(+), 945 deletions(-) delete mode 100644 src/gradientai/resources/model_providers/anthropic/__init__.py delete mode 100644 src/gradientai/resources/model_providers/anthropic/anthropic.py delete mode 100644 src/gradientai/resources/model_providers/openai/__init__.py delete mode 100644 src/gradientai/resources/model_providers/openai/openai.py create mode 100644 src/gradientai/resources/models/__init__.py rename src/gradientai/resources/{ => models}/models.py (85%) rename src/gradientai/resources/{model_providers => models/providers}/__init__.py (65%) rename src/gradientai/resources/{model_providers/anthropic/keys.py => models/providers/anthropic.py} (84%) rename src/gradientai/resources/{model_providers/openai/keys.py => models/providers/openai.py} (85%) rename src/gradientai/resources/{model_providers/model_providers.py => models/providers/providers.py} (58%) delete mode 100644 src/gradientai/types/model_providers/anthropic/__init__.py delete mode 100644 src/gradientai/types/model_providers/openai/__init__.py rename src/gradientai/types/{model_providers => models}/__init__.py (100%) create mode 100644 src/gradientai/types/models/providers/__init__.py rename src/gradientai/types/{model_providers/openai/key_create_params.py => models/providers/anthropic_create_params.py} (68%) rename src/gradientai/types/{model_providers/anthropic/key_update_response.py => models/providers/anthropic_create_response.py} (77%) rename src/gradientai/types/{model_providers/anthropic/key_create_response.py => models/providers/anthropic_delete_response.py} (77%) rename src/gradientai/types/{model_providers/openai/key_retrieve_agents_params.py => models/providers/anthropic_list_agents_params.py} (71%) rename src/gradientai/types/{model_providers/anthropic/key_list_agents_response.py => models/providers/anthropic_list_agents_response.py} (83%) rename src/gradientai/types/{model_providers/anthropic/key_list_params.py => models/providers/anthropic_list_params.py} (74%) rename src/gradientai/types/{model_providers/anthropic/key_list_response.py => models/providers/anthropic_list_response.py} (85%) rename src/gradientai/types/{model_providers/anthropic/key_retrieve_response.py => models/providers/anthropic_retrieve_response.py} (76%) rename src/gradientai/types/{model_providers/openai/key_update_params.py => models/providers/anthropic_update_params.py} (78%) rename src/gradientai/types/{model_providers/anthropic/key_delete_response.py => models/providers/anthropic_update_response.py} (77%) rename src/gradientai/types/{model_providers/anthropic/key_create_params.py => models/providers/openai_create_params.py} (70%) rename src/gradientai/types/{model_providers/openai/key_create_response.py => models/providers/openai_create_response.py} (78%) rename src/gradientai/types/{model_providers/openai/key_delete_response.py => models/providers/openai_delete_response.py} (78%) rename src/gradientai/types/{model_providers/openai/key_list_params.py => models/providers/openai_list_params.py} (75%) rename src/gradientai/types/{model_providers/openai/key_list_response.py => models/providers/openai_list_response.py} (86%) rename src/gradientai/types/{model_providers/anthropic/key_list_agents_params.py => models/providers/openai_retrieve_agents_params.py} (71%) rename src/gradientai/types/{model_providers/openai/key_retrieve_agents_response.py => models/providers/openai_retrieve_agents_response.py} (82%) rename src/gradientai/types/{model_providers/openai/key_update_response.py => models/providers/openai_retrieve_response.py} (77%) rename src/gradientai/types/{model_providers/anthropic/key_update_params.py => models/providers/openai_update_params.py} (79%) rename src/gradientai/types/{model_providers/openai/key_retrieve_response.py => models/providers/openai_update_response.py} (78%) delete mode 100644 tests/api_resources/model_providers/openai/__init__.py rename tests/api_resources/{model_providers => models}/__init__.py (100%) rename tests/api_resources/{model_providers/anthropic => models/providers}/__init__.py (100%) rename tests/api_resources/{model_providers/anthropic/test_keys.py => models/providers/test_anthropic.py} (59%) rename tests/api_resources/{model_providers/openai/test_keys.py => models/providers/test_openai.py} (62%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aef77646..6bfd00b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,6 @@ jobs: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -43,7 +42,6 @@ jobs: contents: read id-token: write runs-on: depot-ubuntu-24.04 - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -64,7 +62,6 @@ jobs: timeout-minutes: 10 name: test runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 diff --git a/.stats.yml b/.stats.yml index ce83998c..0a6b7a71 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 9b44ce3fd39c43f2001bc11934e6b1b0 +config_hash: 1c936b3bd798c3fcb25479b19efa999a diff --git a/api.md b/api.md index fc8d20eb..65699eaa 100644 --- a/api.md +++ b/api.md @@ -267,60 +267,6 @@ Methods: - client.chat.completions.create(\*\*params) -> CompletionCreateResponse -# ModelProviders - -## Anthropic - -### Keys - -Types: - -```python -from gradientai.types.model_providers.anthropic import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyListAgentsResponse, -) -``` - -Methods: - -- client.model_providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.model_providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.model_providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.model_providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.model_providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.model_providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse - -## OpenAI - -### Keys - -Types: - -```python -from gradientai.types.model_providers.openai import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyRetrieveAgentsResponse, -) -``` - -Methods: - -- client.model_providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.model_providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.model_providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.model_providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.model_providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.model_providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse - # Regions Types: @@ -438,4 +384,54 @@ from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListR Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse + +## Providers + +### Anthropic + +Types: + +```python +from gradientai.types.models.providers import ( + AnthropicCreateResponse, + AnthropicRetrieveResponse, + AnthropicUpdateResponse, + AnthropicListResponse, + AnthropicDeleteResponse, + AnthropicListAgentsResponse, +) +``` + +Methods: + +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse + +### OpenAI + +Types: + +```python +from gradientai.types.models.providers import ( + OpenAICreateResponse, + OpenAIRetrieveResponse, + OpenAIUpdateResponse, + OpenAIListResponse, + OpenAIDeleteResponse, + OpenAIRetrieveAgentsResponse, +) +``` + +Methods: + +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index c9fe6733..2dc19e49 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -32,14 +32,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, knowledge_bases, model_providers - from .resources.models import ModelsResource, AsyncModelsResource + from .resources import chat, agents, models, regions, inference, knowledge_bases from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource - from .resources.model_providers.model_providers import ModelProvidersResource, AsyncModelProvidersResource __all__ = [ "Timeout", @@ -131,12 +130,6 @@ def chat(self) -> ChatResource: return ChatResource(self) - @cached_property - def model_providers(self) -> ModelProvidersResource: - from .resources.model_providers import ModelProvidersResource - - return ModelProvidersResource(self) - @cached_property def regions(self) -> RegionsResource: from .resources.regions import RegionsResource @@ -371,12 +364,6 @@ def chat(self) -> AsyncChatResource: return AsyncChatResource(self) - @cached_property - def model_providers(self) -> AsyncModelProvidersResource: - from .resources.model_providers import AsyncModelProvidersResource - - return AsyncModelProvidersResource(self) - @cached_property def regions(self) -> AsyncRegionsResource: from .resources.regions import AsyncRegionsResource @@ -551,12 +538,6 @@ def chat(self) -> chat.ChatResourceWithRawResponse: return ChatResourceWithRawResponse(self._client.chat) - @cached_property - def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse: - from .resources.model_providers import ModelProvidersResourceWithRawResponse - - return ModelProvidersResourceWithRawResponse(self._client.model_providers) - @cached_property def regions(self) -> regions.RegionsResourceWithRawResponse: from .resources.regions import RegionsResourceWithRawResponse @@ -600,12 +581,6 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse: return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property - def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse: - from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse - - return AsyncModelProvidersResourceWithRawResponse(self._client.model_providers) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: from .resources.regions import AsyncRegionsResourceWithRawResponse @@ -649,12 +624,6 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse: return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property - def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse: - from .resources.model_providers import ModelProvidersResourceWithStreamingResponse - - return ModelProvidersResourceWithStreamingResponse(self._client.model_providers) - @cached_property def regions(self) -> regions.RegionsResourceWithStreamingResponse: from .resources.regions import RegionsResourceWithStreamingResponse @@ -698,12 +667,6 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property - def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse: - from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse - - return AsyncModelProvidersResourceWithStreamingResponse(self._client.model_providers) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: from .resources.regions import AsyncRegionsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index b56e7e4c..e1ed4a00 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -48,14 +48,6 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) -from .model_providers import ( - ModelProvidersResource, - AsyncModelProvidersResource, - ModelProvidersResourceWithRawResponse, - AsyncModelProvidersResourceWithRawResponse, - ModelProvidersResourceWithStreamingResponse, - AsyncModelProvidersResourceWithStreamingResponse, -) __all__ = [ "AgentsResource", @@ -70,12 +62,6 @@ "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", - "ModelProvidersResource", - "AsyncModelProvidersResource", - "ModelProvidersResourceWithRawResponse", - "AsyncModelProvidersResourceWithRawResponse", - "ModelProvidersResourceWithStreamingResponse", - "AsyncModelProvidersResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", diff --git a/src/gradientai/resources/model_providers/anthropic/__init__.py b/src/gradientai/resources/model_providers/anthropic/__init__.py deleted file mode 100644 index 057a3a2f..00000000 --- a/src/gradientai/resources/model_providers/anthropic/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/model_providers/anthropic/anthropic.py b/src/gradientai/resources/model_providers/anthropic/anthropic.py deleted file mode 100644 index 23a914e9..00000000 --- a/src/gradientai/resources/model_providers/anthropic/anthropic.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["AnthropicResource", "AsyncAnthropicResource"] - - -class AnthropicResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AnthropicResourceWithStreamingResponse(self) - - -class AsyncAnthropicResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncAnthropicResourceWithStreamingResponse(self) - - -class AnthropicResourceWithRawResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithRawResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._anthropic.keys) - - -class AnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/gradientai/resources/model_providers/openai/__init__.py b/src/gradientai/resources/model_providers/openai/__init__.py deleted file mode 100644 index 66d8ca7a..00000000 --- a/src/gradientai/resources/model_providers/openai/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/model_providers/openai/openai.py b/src/gradientai/resources/model_providers/openai/openai.py deleted file mode 100644 index b02dc2e1..00000000 --- a/src/gradientai/resources/model_providers/openai/openai.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["OpenAIResource", "AsyncOpenAIResource"] - - -class OpenAIResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> OpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return OpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return OpenAIResourceWithStreamingResponse(self) - - -class AsyncOpenAIResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncOpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncOpenAIResourceWithStreamingResponse(self) - - -class OpenAIResourceWithRawResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithRawResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._openai.keys) - - -class OpenAIResourceWithStreamingResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithStreamingResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/gradientai/resources/models/__init__.py b/src/gradientai/resources/models/__init__.py new file mode 100644 index 00000000..e30dd201 --- /dev/null +++ b/src/gradientai/resources/models/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) + +__all__ = [ + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models/models.py similarity index 85% rename from src/gradientai/resources/models.py rename to src/gradientai/resources/models/models.py index c8e78b9b..3c524767 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models/models.py @@ -7,24 +7,36 @@ import httpx -from ..types import model_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ...types import model_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.model_list_response import ModelListResponse +from ..._base_client import make_request_options +from .providers.providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) +from ...types.model_list_response import ModelListResponse __all__ = ["ModelsResource", "AsyncModelsResource"] class ModelsResource(SyncAPIResource): + @cached_property + def providers(self) -> ProvidersResource: + return ProvidersResource(self._client) + @cached_property def with_raw_response(self) -> ModelsResourceWithRawResponse: """ @@ -122,6 +134,10 @@ def list( class AsyncModelsResource(AsyncAPIResource): + @cached_property + def providers(self) -> AsyncProvidersResource: + return AsyncProvidersResource(self._client) + @cached_property def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: """ @@ -226,6 +242,10 @@ def __init__(self, models: ModelsResource) -> None: models.list, ) + @cached_property + def providers(self) -> ProvidersResourceWithRawResponse: + return ProvidersResourceWithRawResponse(self._models.providers) + class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: @@ -235,6 +255,10 @@ def __init__(self, models: AsyncModelsResource) -> None: models.list, ) + @cached_property + def providers(self) -> AsyncProvidersResourceWithRawResponse: + return AsyncProvidersResourceWithRawResponse(self._models.providers) + class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: @@ -244,6 +268,10 @@ def __init__(self, models: ModelsResource) -> None: models.list, ) + @cached_property + def providers(self) -> ProvidersResourceWithStreamingResponse: + return ProvidersResourceWithStreamingResponse(self._models.providers) + class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: @@ -252,3 +280,7 @@ def __init__(self, models: AsyncModelsResource) -> None: self.list = async_to_streamed_response_wrapper( models.list, ) + + @cached_property + def providers(self) -> AsyncProvidersResourceWithStreamingResponse: + return AsyncProvidersResourceWithStreamingResponse(self._models.providers) diff --git a/src/gradientai/resources/model_providers/__init__.py b/src/gradientai/resources/models/providers/__init__.py similarity index 65% rename from src/gradientai/resources/model_providers/__init__.py rename to src/gradientai/resources/models/providers/__init__.py index 3d91a86c..1731e057 100644 --- a/src/gradientai/resources/model_providers/__init__.py +++ b/src/gradientai/resources/models/providers/__init__.py @@ -16,13 +16,13 @@ AnthropicResourceWithStreamingResponse, AsyncAnthropicResourceWithStreamingResponse, ) -from .model_providers import ( - ModelProvidersResource, - AsyncModelProvidersResource, - ModelProvidersResourceWithRawResponse, - AsyncModelProvidersResourceWithRawResponse, - ModelProvidersResourceWithStreamingResponse, - AsyncModelProvidersResourceWithStreamingResponse, +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, ) __all__ = [ @@ -38,10 +38,10 @@ "AsyncOpenAIResourceWithRawResponse", "OpenAIResourceWithStreamingResponse", "AsyncOpenAIResourceWithStreamingResponse", - "ModelProvidersResource", - "AsyncModelProvidersResource", - "ModelProvidersResourceWithRawResponse", - "AsyncModelProvidersResourceWithRawResponse", - "ModelProvidersResourceWithStreamingResponse", - "AsyncModelProvidersResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/model_providers/anthropic/keys.py b/src/gradientai/resources/models/providers/anthropic.py similarity index 84% rename from src/gradientai/resources/model_providers/anthropic/keys.py rename to src/gradientai/resources/models/providers/anthropic.py index 4d884655..26c9b977 100644 --- a/src/gradientai/resources/model_providers/anthropic/keys.py +++ b/src/gradientai/resources/models/providers/anthropic.py @@ -15,41 +15,41 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options -from ....types.model_providers.anthropic import ( - key_list_params, - key_create_params, - key_update_params, - key_list_agents_params, +from ....types.models.providers import ( + anthropic_list_params, + anthropic_create_params, + anthropic_update_params, + anthropic_list_agents_params, ) -from ....types.model_providers.anthropic.key_list_response import KeyListResponse -from ....types.model_providers.anthropic.key_create_response import KeyCreateResponse -from ....types.model_providers.anthropic.key_delete_response import KeyDeleteResponse -from ....types.model_providers.anthropic.key_update_response import KeyUpdateResponse -from ....types.model_providers.anthropic.key_retrieve_response import KeyRetrieveResponse -from ....types.model_providers.anthropic.key_list_agents_response import KeyListAgentsResponse +from ....types.models.providers.anthropic_list_response import AnthropicListResponse +from ....types.models.providers.anthropic_create_response import AnthropicCreateResponse +from ....types.models.providers.anthropic_delete_response import AnthropicDeleteResponse +from ....types.models.providers.anthropic_update_response import AnthropicUpdateResponse +from ....types.models.providers.anthropic_retrieve_response import AnthropicRetrieveResponse +from ....types.models.providers.anthropic_list_agents_response import AnthropicListAgentsResponse -__all__ = ["KeysResource", "AsyncKeysResource"] +__all__ = ["AnthropicResource", "AsyncAnthropicResource"] -class KeysResource(SyncAPIResource): +class AnthropicResource(SyncAPIResource): @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: + def with_raw_response(self) -> AnthropicResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return KeysResourceWithRawResponse(self) + return AnthropicResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return KeysResourceWithStreamingResponse(self) + return AnthropicResourceWithStreamingResponse(self) def create( self, @@ -62,7 +62,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: + ) -> AnthropicCreateResponse: """ To create an Anthropic API key, send a POST request to `/v2/gen-ai/anthropic/keys`. @@ -85,12 +85,12 @@ def create( "api_key": api_key, "name": name, }, - key_create_params.KeyCreateParams, + anthropic_create_params.AnthropicCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyCreateResponse, + cast_to=AnthropicCreateResponse, ) def retrieve( @@ -103,7 +103,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: + ) -> AnthropicRetrieveResponse: """ To retrieve details of an Anthropic API key, send a GET request to `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. @@ -126,7 +126,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyRetrieveResponse, + cast_to=AnthropicRetrieveResponse, ) def update( @@ -142,7 +142,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: + ) -> AnthropicUpdateResponse: """ To update an Anthropic API key, send a PUT request to `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. @@ -168,12 +168,12 @@ def update( "body_api_key_uuid": body_api_key_uuid, "name": name, }, - key_update_params.KeyUpdateParams, + anthropic_update_params.AnthropicUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyUpdateResponse, + cast_to=AnthropicUpdateResponse, ) def list( @@ -187,7 +187,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: + ) -> AnthropicListResponse: """ To list all Anthropic API keys, send a GET request to `/v2/gen-ai/anthropic/keys`. @@ -219,10 +219,10 @@ def list( "page": page, "per_page": per_page, }, - key_list_params.KeyListParams, + anthropic_list_params.AnthropicListParams, ), ), - cast_to=KeyListResponse, + cast_to=AnthropicListResponse, ) def delete( @@ -235,7 +235,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: + ) -> AnthropicDeleteResponse: """ To delete an Anthropic API key, send a DELETE request to `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. @@ -258,7 +258,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyDeleteResponse, + cast_to=AnthropicDeleteResponse, ) def list_agents( @@ -273,7 +273,7 @@ def list_agents( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: + ) -> AnthropicListAgentsResponse: """ List Agents by Anthropic Key. @@ -306,32 +306,32 @@ def list_agents( "page": page, "per_page": per_page, }, - key_list_agents_params.KeyListAgentsParams, + anthropic_list_agents_params.AnthropicListAgentsParams, ), ), - cast_to=KeyListAgentsResponse, + cast_to=AnthropicListAgentsResponse, ) -class AsyncKeysResource(AsyncAPIResource): +class AsyncAnthropicResource(AsyncAPIResource): @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return AsyncKeysResourceWithRawResponse(self) + return AsyncAnthropicResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return AsyncKeysResourceWithStreamingResponse(self) + return AsyncAnthropicResourceWithStreamingResponse(self) async def create( self, @@ -344,7 +344,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: + ) -> AnthropicCreateResponse: """ To create an Anthropic API key, send a POST request to `/v2/gen-ai/anthropic/keys`. @@ -367,12 +367,12 @@ async def create( "api_key": api_key, "name": name, }, - key_create_params.KeyCreateParams, + anthropic_create_params.AnthropicCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyCreateResponse, + cast_to=AnthropicCreateResponse, ) async def retrieve( @@ -385,7 +385,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: + ) -> AnthropicRetrieveResponse: """ To retrieve details of an Anthropic API key, send a GET request to `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. @@ -408,7 +408,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyRetrieveResponse, + cast_to=AnthropicRetrieveResponse, ) async def update( @@ -424,7 +424,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: + ) -> AnthropicUpdateResponse: """ To update an Anthropic API key, send a PUT request to `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. @@ -450,12 +450,12 @@ async def update( "body_api_key_uuid": body_api_key_uuid, "name": name, }, - key_update_params.KeyUpdateParams, + anthropic_update_params.AnthropicUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyUpdateResponse, + cast_to=AnthropicUpdateResponse, ) async def list( @@ -469,7 +469,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: + ) -> AnthropicListResponse: """ To list all Anthropic API keys, send a GET request to `/v2/gen-ai/anthropic/keys`. @@ -501,10 +501,10 @@ async def list( "page": page, "per_page": per_page, }, - key_list_params.KeyListParams, + anthropic_list_params.AnthropicListParams, ), ), - cast_to=KeyListResponse, + cast_to=AnthropicListResponse, ) async def delete( @@ -517,7 +517,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: + ) -> AnthropicDeleteResponse: """ To delete an Anthropic API key, send a DELETE request to `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. @@ -540,7 +540,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyDeleteResponse, + cast_to=AnthropicDeleteResponse, ) async def list_agents( @@ -555,7 +555,7 @@ async def list_agents( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: + ) -> AnthropicListAgentsResponse: """ List Agents by Anthropic Key. @@ -588,104 +588,104 @@ async def list_agents( "page": page, "per_page": per_page, }, - key_list_agents_params.KeyListAgentsParams, + anthropic_list_agents_params.AnthropicListAgentsParams, ), ), - cast_to=KeyListAgentsResponse, + cast_to=AnthropicListAgentsResponse, ) -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys +class AnthropicResourceWithRawResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic self.create = to_raw_response_wrapper( - keys.create, + anthropic.create, ) self.retrieve = to_raw_response_wrapper( - keys.retrieve, + anthropic.retrieve, ) self.update = to_raw_response_wrapper( - keys.update, + anthropic.update, ) self.list = to_raw_response_wrapper( - keys.list, + anthropic.list, ) self.delete = to_raw_response_wrapper( - keys.delete, + anthropic.delete, ) self.list_agents = to_raw_response_wrapper( - keys.list_agents, + anthropic.list_agents, ) -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys +class AsyncAnthropicResourceWithRawResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic self.create = async_to_raw_response_wrapper( - keys.create, + anthropic.create, ) self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, + anthropic.retrieve, ) self.update = async_to_raw_response_wrapper( - keys.update, + anthropic.update, ) self.list = async_to_raw_response_wrapper( - keys.list, + anthropic.list, ) self.delete = async_to_raw_response_wrapper( - keys.delete, + anthropic.delete, ) self.list_agents = async_to_raw_response_wrapper( - keys.list_agents, + anthropic.list_agents, ) -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys +class AnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic self.create = to_streamed_response_wrapper( - keys.create, + anthropic.create, ) self.retrieve = to_streamed_response_wrapper( - keys.retrieve, + anthropic.retrieve, ) self.update = to_streamed_response_wrapper( - keys.update, + anthropic.update, ) self.list = to_streamed_response_wrapper( - keys.list, + anthropic.list, ) self.delete = to_streamed_response_wrapper( - keys.delete, + anthropic.delete, ) self.list_agents = to_streamed_response_wrapper( - keys.list_agents, + anthropic.list_agents, ) -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys +class AsyncAnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic self.create = async_to_streamed_response_wrapper( - keys.create, + anthropic.create, ) self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, + anthropic.retrieve, ) self.update = async_to_streamed_response_wrapper( - keys.update, + anthropic.update, ) self.list = async_to_streamed_response_wrapper( - keys.list, + anthropic.list, ) self.delete = async_to_streamed_response_wrapper( - keys.delete, + anthropic.delete, ) self.list_agents = async_to_streamed_response_wrapper( - keys.list_agents, + anthropic.list_agents, ) diff --git a/src/gradientai/resources/model_providers/openai/keys.py b/src/gradientai/resources/models/providers/openai.py similarity index 85% rename from src/gradientai/resources/model_providers/openai/keys.py rename to src/gradientai/resources/models/providers/openai.py index fb974808..d337cd9b 100644 --- a/src/gradientai/resources/model_providers/openai/keys.py +++ b/src/gradientai/resources/models/providers/openai.py @@ -15,41 +15,41 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options -from ....types.model_providers.openai import ( - key_list_params, - key_create_params, - key_update_params, - key_retrieve_agents_params, +from ....types.models.providers import ( + openai_list_params, + openai_create_params, + openai_update_params, + openai_retrieve_agents_params, ) -from ....types.model_providers.openai.key_list_response import KeyListResponse -from ....types.model_providers.openai.key_create_response import KeyCreateResponse -from ....types.model_providers.openai.key_delete_response import KeyDeleteResponse -from ....types.model_providers.openai.key_update_response import KeyUpdateResponse -from ....types.model_providers.openai.key_retrieve_response import KeyRetrieveResponse -from ....types.model_providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse +from ....types.models.providers.openai_list_response import OpenAIListResponse +from ....types.models.providers.openai_create_response import OpenAICreateResponse +from ....types.models.providers.openai_delete_response import OpenAIDeleteResponse +from ....types.models.providers.openai_update_response import OpenAIUpdateResponse +from ....types.models.providers.openai_retrieve_response import OpenAIRetrieveResponse +from ....types.models.providers.openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse -__all__ = ["KeysResource", "AsyncKeysResource"] +__all__ = ["OpenAIResource", "AsyncOpenAIResource"] -class KeysResource(SyncAPIResource): +class OpenAIResource(SyncAPIResource): @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: + def with_raw_response(self) -> OpenAIResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return KeysResourceWithRawResponse(self) + return OpenAIResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return KeysResourceWithStreamingResponse(self) + return OpenAIResourceWithStreamingResponse(self) def create( self, @@ -62,7 +62,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: + ) -> OpenAICreateResponse: """ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. @@ -84,12 +84,12 @@ def create( "api_key": api_key, "name": name, }, - key_create_params.KeyCreateParams, + openai_create_params.OpenAICreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyCreateResponse, + cast_to=OpenAICreateResponse, ) def retrieve( @@ -102,7 +102,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: + ) -> OpenAIRetrieveResponse: """ To retrieve details of an OpenAI API key, send a GET request to `/v2/gen-ai/openai/keys/{api_key_uuid}`. @@ -125,7 +125,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyRetrieveResponse, + cast_to=OpenAIRetrieveResponse, ) def update( @@ -141,7 +141,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: + ) -> OpenAIUpdateResponse: """ To update an OpenAI API key, send a PUT request to `/v2/gen-ai/openai/keys/{api_key_uuid}`. @@ -167,12 +167,12 @@ def update( "body_api_key_uuid": body_api_key_uuid, "name": name, }, - key_update_params.KeyUpdateParams, + openai_update_params.OpenAIUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyUpdateResponse, + cast_to=OpenAIUpdateResponse, ) def list( @@ -186,7 +186,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: + ) -> OpenAIListResponse: """ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. @@ -217,10 +217,10 @@ def list( "page": page, "per_page": per_page, }, - key_list_params.KeyListParams, + openai_list_params.OpenAIListParams, ), ), - cast_to=KeyListResponse, + cast_to=OpenAIListResponse, ) def delete( @@ -233,7 +233,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: + ) -> OpenAIDeleteResponse: """ To delete an OpenAI API key, send a DELETE request to `/v2/gen-ai/openai/keys/{api_key_uuid}`. @@ -256,7 +256,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyDeleteResponse, + cast_to=OpenAIDeleteResponse, ) def retrieve_agents( @@ -271,7 +271,7 @@ def retrieve_agents( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: + ) -> OpenAIRetrieveAgentsResponse: """ List Agents by OpenAI Key. @@ -304,32 +304,32 @@ def retrieve_agents( "page": page, "per_page": per_page, }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, + openai_retrieve_agents_params.OpenAIRetrieveAgentsParams, ), ), - cast_to=KeyRetrieveAgentsResponse, + cast_to=OpenAIRetrieveAgentsResponse, ) -class AsyncKeysResource(AsyncAPIResource): +class AsyncOpenAIResource(AsyncAPIResource): @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return AsyncKeysResourceWithRawResponse(self) + return AsyncOpenAIResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return AsyncKeysResourceWithStreamingResponse(self) + return AsyncOpenAIResourceWithStreamingResponse(self) async def create( self, @@ -342,7 +342,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: + ) -> OpenAICreateResponse: """ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. @@ -364,12 +364,12 @@ async def create( "api_key": api_key, "name": name, }, - key_create_params.KeyCreateParams, + openai_create_params.OpenAICreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyCreateResponse, + cast_to=OpenAICreateResponse, ) async def retrieve( @@ -382,7 +382,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: + ) -> OpenAIRetrieveResponse: """ To retrieve details of an OpenAI API key, send a GET request to `/v2/gen-ai/openai/keys/{api_key_uuid}`. @@ -405,7 +405,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyRetrieveResponse, + cast_to=OpenAIRetrieveResponse, ) async def update( @@ -421,7 +421,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: + ) -> OpenAIUpdateResponse: """ To update an OpenAI API key, send a PUT request to `/v2/gen-ai/openai/keys/{api_key_uuid}`. @@ -447,12 +447,12 @@ async def update( "body_api_key_uuid": body_api_key_uuid, "name": name, }, - key_update_params.KeyUpdateParams, + openai_update_params.OpenAIUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyUpdateResponse, + cast_to=OpenAIUpdateResponse, ) async def list( @@ -466,7 +466,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: + ) -> OpenAIListResponse: """ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. @@ -497,10 +497,10 @@ async def list( "page": page, "per_page": per_page, }, - key_list_params.KeyListParams, + openai_list_params.OpenAIListParams, ), ), - cast_to=KeyListResponse, + cast_to=OpenAIListResponse, ) async def delete( @@ -513,7 +513,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: + ) -> OpenAIDeleteResponse: """ To delete an OpenAI API key, send a DELETE request to `/v2/gen-ai/openai/keys/{api_key_uuid}`. @@ -536,7 +536,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=KeyDeleteResponse, + cast_to=OpenAIDeleteResponse, ) async def retrieve_agents( @@ -551,7 +551,7 @@ async def retrieve_agents( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: + ) -> OpenAIRetrieveAgentsResponse: """ List Agents by OpenAI Key. @@ -584,104 +584,104 @@ async def retrieve_agents( "page": page, "per_page": per_page, }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, + openai_retrieve_agents_params.OpenAIRetrieveAgentsParams, ), ), - cast_to=KeyRetrieveAgentsResponse, + cast_to=OpenAIRetrieveAgentsResponse, ) -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys +class OpenAIResourceWithRawResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai self.create = to_raw_response_wrapper( - keys.create, + openai.create, ) self.retrieve = to_raw_response_wrapper( - keys.retrieve, + openai.retrieve, ) self.update = to_raw_response_wrapper( - keys.update, + openai.update, ) self.list = to_raw_response_wrapper( - keys.list, + openai.list, ) self.delete = to_raw_response_wrapper( - keys.delete, + openai.delete, ) self.retrieve_agents = to_raw_response_wrapper( - keys.retrieve_agents, + openai.retrieve_agents, ) -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys +class AsyncOpenAIResourceWithRawResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai self.create = async_to_raw_response_wrapper( - keys.create, + openai.create, ) self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, + openai.retrieve, ) self.update = async_to_raw_response_wrapper( - keys.update, + openai.update, ) self.list = async_to_raw_response_wrapper( - keys.list, + openai.list, ) self.delete = async_to_raw_response_wrapper( - keys.delete, + openai.delete, ) self.retrieve_agents = async_to_raw_response_wrapper( - keys.retrieve_agents, + openai.retrieve_agents, ) -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys +class OpenAIResourceWithStreamingResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai self.create = to_streamed_response_wrapper( - keys.create, + openai.create, ) self.retrieve = to_streamed_response_wrapper( - keys.retrieve, + openai.retrieve, ) self.update = to_streamed_response_wrapper( - keys.update, + openai.update, ) self.list = to_streamed_response_wrapper( - keys.list, + openai.list, ) self.delete = to_streamed_response_wrapper( - keys.delete, + openai.delete, ) self.retrieve_agents = to_streamed_response_wrapper( - keys.retrieve_agents, + openai.retrieve_agents, ) -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys +class AsyncOpenAIResourceWithStreamingResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai self.create = async_to_streamed_response_wrapper( - keys.create, + openai.create, ) self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, + openai.retrieve, ) self.update = async_to_streamed_response_wrapper( - keys.update, + openai.update, ) self.list = async_to_streamed_response_wrapper( - keys.list, + openai.list, ) self.delete = async_to_streamed_response_wrapper( - keys.delete, + openai.delete, ) self.retrieve_agents = async_to_streamed_response_wrapper( - keys.retrieve_agents, + openai.retrieve_agents, ) diff --git a/src/gradientai/resources/model_providers/model_providers.py b/src/gradientai/resources/models/providers/providers.py similarity index 58% rename from src/gradientai/resources/model_providers/model_providers.py rename to src/gradientai/resources/models/providers/providers.py index cf710ecf..3e3f4dde 100644 --- a/src/gradientai/resources/model_providers/model_providers.py +++ b/src/gradientai/resources/models/providers/providers.py @@ -2,9 +2,7 @@ from __future__ import annotations -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .openai.openai import ( +from .openai import ( OpenAIResource, AsyncOpenAIResource, OpenAIResourceWithRawResponse, @@ -12,7 +10,7 @@ OpenAIResourceWithStreamingResponse, AsyncOpenAIResourceWithStreamingResponse, ) -from .anthropic.anthropic import ( +from .anthropic import ( AnthropicResource, AsyncAnthropicResource, AnthropicResourceWithRawResponse, @@ -20,11 +18,13 @@ AnthropicResourceWithStreamingResponse, AsyncAnthropicResourceWithStreamingResponse, ) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource -__all__ = ["ModelProvidersResource", "AsyncModelProvidersResource"] +__all__ = ["ProvidersResource", "AsyncProvidersResource"] -class ModelProvidersResource(SyncAPIResource): +class ProvidersResource(SyncAPIResource): @cached_property def anthropic(self) -> AnthropicResource: return AnthropicResource(self._client) @@ -34,26 +34,26 @@ def openai(self) -> OpenAIResource: return OpenAIResource(self._client) @cached_property - def with_raw_response(self) -> ModelProvidersResourceWithRawResponse: + def with_raw_response(self) -> ProvidersResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return ModelProvidersResourceWithRawResponse(self) + return ProvidersResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> ModelProvidersResourceWithStreamingResponse: + def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return ModelProvidersResourceWithStreamingResponse(self) + return ProvidersResourceWithStreamingResponse(self) -class AsyncModelProvidersResource(AsyncAPIResource): +class AsyncProvidersResource(AsyncAPIResource): @cached_property def anthropic(self) -> AsyncAnthropicResource: return AsyncAnthropicResource(self._client) @@ -63,72 +63,72 @@ def openai(self) -> AsyncOpenAIResource: return AsyncOpenAIResource(self._client) @cached_property - def with_raw_response(self) -> AsyncModelProvidersResourceWithRawResponse: + def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ - return AsyncModelProvidersResourceWithRawResponse(self) + return AsyncProvidersResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncModelProvidersResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ - return AsyncModelProvidersResourceWithStreamingResponse(self) + return AsyncProvidersResourceWithStreamingResponse(self) -class ModelProvidersResourceWithRawResponse: - def __init__(self, model_providers: ModelProvidersResource) -> None: - self._model_providers = model_providers +class ProvidersResourceWithRawResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers @cached_property def anthropic(self) -> AnthropicResourceWithRawResponse: - return AnthropicResourceWithRawResponse(self._model_providers.anthropic) + return AnthropicResourceWithRawResponse(self._providers.anthropic) @cached_property def openai(self) -> OpenAIResourceWithRawResponse: - return OpenAIResourceWithRawResponse(self._model_providers.openai) + return OpenAIResourceWithRawResponse(self._providers.openai) -class AsyncModelProvidersResourceWithRawResponse: - def __init__(self, model_providers: AsyncModelProvidersResource) -> None: - self._model_providers = model_providers +class AsyncProvidersResourceWithRawResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers @cached_property def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: - return AsyncAnthropicResourceWithRawResponse(self._model_providers.anthropic) + return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) @cached_property def openai(self) -> AsyncOpenAIResourceWithRawResponse: - return AsyncOpenAIResourceWithRawResponse(self._model_providers.openai) + return AsyncOpenAIResourceWithRawResponse(self._providers.openai) -class ModelProvidersResourceWithStreamingResponse: - def __init__(self, model_providers: ModelProvidersResource) -> None: - self._model_providers = model_providers +class ProvidersResourceWithStreamingResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers @cached_property def anthropic(self) -> AnthropicResourceWithStreamingResponse: - return AnthropicResourceWithStreamingResponse(self._model_providers.anthropic) + return AnthropicResourceWithStreamingResponse(self._providers.anthropic) @cached_property def openai(self) -> OpenAIResourceWithStreamingResponse: - return OpenAIResourceWithStreamingResponse(self._model_providers.openai) + return OpenAIResourceWithStreamingResponse(self._providers.openai) -class AsyncModelProvidersResourceWithStreamingResponse: - def __init__(self, model_providers: AsyncModelProvidersResource) -> None: - self._model_providers = model_providers +class AsyncProvidersResourceWithStreamingResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers @cached_property def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: - return AsyncAnthropicResourceWithStreamingResponse(self._model_providers.anthropic) + return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) @cached_property def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: - return AsyncOpenAIResourceWithStreamingResponse(self._model_providers.openai) + return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/gradientai/types/model_providers/anthropic/__init__.py b/src/gradientai/types/model_providers/anthropic/__init__.py deleted file mode 100644 index eb47e709..00000000 --- a/src/gradientai/types/model_providers/anthropic/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams -from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/gradientai/types/model_providers/openai/__init__.py b/src/gradientai/types/model_providers/openai/__init__.py deleted file mode 100644 index 70abf332..00000000 --- a/src/gradientai/types/model_providers/openai/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams -from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/gradientai/types/model_providers/__init__.py b/src/gradientai/types/models/__init__.py similarity index 100% rename from src/gradientai/types/model_providers/__init__.py rename to src/gradientai/types/models/__init__.py diff --git a/src/gradientai/types/models/providers/__init__.py b/src/gradientai/types/models/providers/__init__.py new file mode 100644 index 00000000..74366e70 --- /dev/null +++ b/src/gradientai/types/models/providers/__init__.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .openai_list_params import OpenAIListParams as OpenAIListParams +from .openai_create_params import OpenAICreateParams as OpenAICreateParams +from .openai_list_response import OpenAIListResponse as OpenAIListResponse +from .openai_update_params import OpenAIUpdateParams as OpenAIUpdateParams +from .anthropic_list_params import AnthropicListParams as AnthropicListParams +from .openai_create_response import OpenAICreateResponse as OpenAICreateResponse +from .openai_delete_response import OpenAIDeleteResponse as OpenAIDeleteResponse +from .openai_update_response import OpenAIUpdateResponse as OpenAIUpdateResponse +from .anthropic_create_params import AnthropicCreateParams as AnthropicCreateParams +from .anthropic_list_response import AnthropicListResponse as AnthropicListResponse +from .anthropic_update_params import AnthropicUpdateParams as AnthropicUpdateParams +from .openai_retrieve_response import OpenAIRetrieveResponse as OpenAIRetrieveResponse +from .anthropic_create_response import AnthropicCreateResponse as AnthropicCreateResponse +from .anthropic_delete_response import AnthropicDeleteResponse as AnthropicDeleteResponse +from .anthropic_update_response import AnthropicUpdateResponse as AnthropicUpdateResponse +from .anthropic_retrieve_response import AnthropicRetrieveResponse as AnthropicRetrieveResponse +from .anthropic_list_agents_params import AnthropicListAgentsParams as AnthropicListAgentsParams +from .openai_retrieve_agents_params import OpenAIRetrieveAgentsParams as OpenAIRetrieveAgentsParams +from .anthropic_list_agents_response import AnthropicListAgentsResponse as AnthropicListAgentsResponse +from .openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse as OpenAIRetrieveAgentsResponse diff --git a/src/gradientai/types/model_providers/openai/key_create_params.py b/src/gradientai/types/models/providers/anthropic_create_params.py similarity index 68% rename from src/gradientai/types/model_providers/openai/key_create_params.py rename to src/gradientai/types/models/providers/anthropic_create_params.py index 389f167c..b624121f 100644 --- a/src/gradientai/types/model_providers/openai/key_create_params.py +++ b/src/gradientai/types/models/providers/anthropic_create_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["KeyCreateParams"] +__all__ = ["AnthropicCreateParams"] -class KeyCreateParams(TypedDict, total=False): +class AnthropicCreateParams(TypedDict, total=False): api_key: str name: str diff --git a/src/gradientai/types/model_providers/anthropic/key_update_response.py b/src/gradientai/types/models/providers/anthropic_create_response.py similarity index 77% rename from src/gradientai/types/model_providers/anthropic/key_update_response.py rename to src/gradientai/types/models/providers/anthropic_create_response.py index b04277a6..f0b8d2d1 100644 --- a/src/gradientai/types/model_providers/anthropic/key_update_response.py +++ b/src/gradientai/types/models/providers/anthropic_create_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo -__all__ = ["KeyUpdateResponse"] +__all__ = ["AnthropicCreateResponse"] -class KeyUpdateResponse(BaseModel): +class AnthropicCreateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/anthropic/key_create_response.py b/src/gradientai/types/models/providers/anthropic_delete_response.py similarity index 77% rename from src/gradientai/types/model_providers/anthropic/key_create_response.py rename to src/gradientai/types/models/providers/anthropic_delete_response.py index a032810c..a3842bbc 100644 --- a/src/gradientai/types/model_providers/anthropic/key_create_response.py +++ b/src/gradientai/types/models/providers/anthropic_delete_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo -__all__ = ["KeyCreateResponse"] +__all__ = ["AnthropicDeleteResponse"] -class KeyCreateResponse(BaseModel): +class AnthropicDeleteResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/openai/key_retrieve_agents_params.py b/src/gradientai/types/models/providers/anthropic_list_agents_params.py similarity index 71% rename from src/gradientai/types/model_providers/openai/key_retrieve_agents_params.py rename to src/gradientai/types/models/providers/anthropic_list_agents_params.py index ec745d14..1a5b8229 100644 --- a/src/gradientai/types/model_providers/openai/key_retrieve_agents_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["KeyRetrieveAgentsParams"] +__all__ = ["AnthropicListAgentsParams"] -class KeyRetrieveAgentsParams(TypedDict, total=False): +class AnthropicListAgentsParams(TypedDict, total=False): page: int """page number.""" diff --git a/src/gradientai/types/model_providers/anthropic/key_list_agents_response.py b/src/gradientai/types/models/providers/anthropic_list_agents_response.py similarity index 83% rename from src/gradientai/types/model_providers/anthropic/key_list_agents_response.py rename to src/gradientai/types/models/providers/anthropic_list_agents_response.py index c9e74cf7..6816f0db 100644 --- a/src/gradientai/types/model_providers/anthropic/key_list_agents_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_response.py @@ -8,10 +8,10 @@ from ...shared.api_meta import APIMeta from ...shared.api_links import APILinks -__all__ = ["KeyListAgentsResponse"] +__all__ = ["AnthropicListAgentsResponse"] -class KeyListAgentsResponse(BaseModel): +class AnthropicListAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/model_providers/anthropic/key_list_params.py b/src/gradientai/types/models/providers/anthropic_list_params.py similarity index 74% rename from src/gradientai/types/model_providers/anthropic/key_list_params.py rename to src/gradientai/types/models/providers/anthropic_list_params.py index a11458ad..de8ce520 100644 --- a/src/gradientai/types/model_providers/anthropic/key_list_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["KeyListParams"] +__all__ = ["AnthropicListParams"] -class KeyListParams(TypedDict, total=False): +class AnthropicListParams(TypedDict, total=False): page: int """page number.""" diff --git a/src/gradientai/types/model_providers/anthropic/key_list_response.py b/src/gradientai/types/models/providers/anthropic_list_response.py similarity index 85% rename from src/gradientai/types/model_providers/anthropic/key_list_response.py rename to src/gradientai/types/models/providers/anthropic_list_response.py index e3e3e5ef..77999f5b 100644 --- a/src/gradientai/types/model_providers/anthropic/key_list_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_response.py @@ -7,10 +7,10 @@ from ...shared.api_links import APILinks from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo -__all__ = ["KeyListResponse"] +__all__ = ["AnthropicListResponse"] -class KeyListResponse(BaseModel): +class AnthropicListResponse(BaseModel): api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/model_providers/anthropic/key_retrieve_response.py b/src/gradientai/types/models/providers/anthropic_retrieve_response.py similarity index 76% rename from src/gradientai/types/model_providers/anthropic/key_retrieve_response.py rename to src/gradientai/types/models/providers/anthropic_retrieve_response.py index b8361fc2..7083b75f 100644 --- a/src/gradientai/types/model_providers/anthropic/key_retrieve_response.py +++ b/src/gradientai/types/models/providers/anthropic_retrieve_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo -__all__ = ["KeyRetrieveResponse"] +__all__ = ["AnthropicRetrieveResponse"] -class KeyRetrieveResponse(BaseModel): +class AnthropicRetrieveResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/openai/key_update_params.py b/src/gradientai/types/models/providers/anthropic_update_params.py similarity index 78% rename from src/gradientai/types/model_providers/openai/key_update_params.py rename to src/gradientai/types/models/providers/anthropic_update_params.py index c07d7f66..7bb03045 100644 --- a/src/gradientai/types/model_providers/openai/key_update_params.py +++ b/src/gradientai/types/models/providers/anthropic_update_params.py @@ -6,10 +6,10 @@ from ...._utils import PropertyInfo -__all__ = ["KeyUpdateParams"] +__all__ = ["AnthropicUpdateParams"] -class KeyUpdateParams(TypedDict, total=False): +class AnthropicUpdateParams(TypedDict, total=False): api_key: str body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] diff --git a/src/gradientai/types/model_providers/anthropic/key_delete_response.py b/src/gradientai/types/models/providers/anthropic_update_response.py similarity index 77% rename from src/gradientai/types/model_providers/anthropic/key_delete_response.py rename to src/gradientai/types/models/providers/anthropic_update_response.py index 2afe2dda..d3b2911b 100644 --- a/src/gradientai/types/model_providers/anthropic/key_delete_response.py +++ b/src/gradientai/types/models/providers/anthropic_update_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo -__all__ = ["KeyDeleteResponse"] +__all__ = ["AnthropicUpdateResponse"] -class KeyDeleteResponse(BaseModel): +class AnthropicUpdateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/anthropic/key_create_params.py b/src/gradientai/types/models/providers/openai_create_params.py similarity index 70% rename from src/gradientai/types/model_providers/anthropic/key_create_params.py rename to src/gradientai/types/models/providers/openai_create_params.py index 389f167c..da655d75 100644 --- a/src/gradientai/types/model_providers/anthropic/key_create_params.py +++ b/src/gradientai/types/models/providers/openai_create_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["KeyCreateParams"] +__all__ = ["OpenAICreateParams"] -class KeyCreateParams(TypedDict, total=False): +class OpenAICreateParams(TypedDict, total=False): api_key: str name: str diff --git a/src/gradientai/types/model_providers/openai/key_create_response.py b/src/gradientai/types/models/providers/openai_create_response.py similarity index 78% rename from src/gradientai/types/model_providers/openai/key_create_response.py rename to src/gradientai/types/models/providers/openai_create_response.py index f3b4d36c..4908a91a 100644 --- a/src/gradientai/types/model_providers/openai/key_create_response.py +++ b/src/gradientai/types/models/providers/openai_create_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo -__all__ = ["KeyCreateResponse"] +__all__ = ["OpenAICreateResponse"] -class KeyCreateResponse(BaseModel): +class OpenAICreateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/openai/key_delete_response.py b/src/gradientai/types/models/providers/openai_delete_response.py similarity index 78% rename from src/gradientai/types/model_providers/openai/key_delete_response.py rename to src/gradientai/types/models/providers/openai_delete_response.py index 0c8922bb..080a251f 100644 --- a/src/gradientai/types/model_providers/openai/key_delete_response.py +++ b/src/gradientai/types/models/providers/openai_delete_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo -__all__ = ["KeyDeleteResponse"] +__all__ = ["OpenAIDeleteResponse"] -class KeyDeleteResponse(BaseModel): +class OpenAIDeleteResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/openai/key_list_params.py b/src/gradientai/types/models/providers/openai_list_params.py similarity index 75% rename from src/gradientai/types/model_providers/openai/key_list_params.py rename to src/gradientai/types/models/providers/openai_list_params.py index a11458ad..e5b86b8d 100644 --- a/src/gradientai/types/model_providers/openai/key_list_params.py +++ b/src/gradientai/types/models/providers/openai_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["KeyListParams"] +__all__ = ["OpenAIListParams"] -class KeyListParams(TypedDict, total=False): +class OpenAIListParams(TypedDict, total=False): page: int """page number.""" diff --git a/src/gradientai/types/model_providers/openai/key_list_response.py b/src/gradientai/types/models/providers/openai_list_response.py similarity index 86% rename from src/gradientai/types/model_providers/openai/key_list_response.py rename to src/gradientai/types/models/providers/openai_list_response.py index 362b5dd6..edbd9fb4 100644 --- a/src/gradientai/types/model_providers/openai/key_list_response.py +++ b/src/gradientai/types/models/providers/openai_list_response.py @@ -7,10 +7,10 @@ from ...shared.api_links import APILinks from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo -__all__ = ["KeyListResponse"] +__all__ = ["OpenAIListResponse"] -class KeyListResponse(BaseModel): +class OpenAIListResponse(BaseModel): api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/model_providers/anthropic/key_list_agents_params.py b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py similarity index 71% rename from src/gradientai/types/model_providers/anthropic/key_list_agents_params.py rename to src/gradientai/types/models/providers/openai_retrieve_agents_params.py index ebbc3b7e..8a41eaf9 100644 --- a/src/gradientai/types/model_providers/anthropic/key_list_agents_params.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["KeyListAgentsParams"] +__all__ = ["OpenAIRetrieveAgentsParams"] -class KeyListAgentsParams(TypedDict, total=False): +class OpenAIRetrieveAgentsParams(TypedDict, total=False): page: int """page number.""" diff --git a/src/gradientai/types/model_providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py similarity index 82% rename from src/gradientai/types/model_providers/openai/key_retrieve_agents_response.py rename to src/gradientai/types/models/providers/openai_retrieve_agents_response.py index 56808bac..b3166636 100644 --- a/src/gradientai/types/model_providers/openai/key_retrieve_agents_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py @@ -8,10 +8,10 @@ from ...shared.api_meta import APIMeta from ...shared.api_links import APILinks -__all__ = ["KeyRetrieveAgentsResponse"] +__all__ = ["OpenAIRetrieveAgentsResponse"] -class KeyRetrieveAgentsResponse(BaseModel): +class OpenAIRetrieveAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/model_providers/openai/key_update_response.py b/src/gradientai/types/models/providers/openai_retrieve_response.py similarity index 77% rename from src/gradientai/types/model_providers/openai/key_update_response.py rename to src/gradientai/types/models/providers/openai_retrieve_response.py index 4889f994..ef23b966 100644 --- a/src/gradientai/types/model_providers/openai/key_update_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo -__all__ = ["KeyUpdateResponse"] +__all__ = ["OpenAIRetrieveResponse"] -class KeyUpdateResponse(BaseModel): +class OpenAIRetrieveResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/model_providers/anthropic/key_update_params.py b/src/gradientai/types/models/providers/openai_update_params.py similarity index 79% rename from src/gradientai/types/model_providers/anthropic/key_update_params.py rename to src/gradientai/types/models/providers/openai_update_params.py index c07d7f66..ab5d02cf 100644 --- a/src/gradientai/types/model_providers/anthropic/key_update_params.py +++ b/src/gradientai/types/models/providers/openai_update_params.py @@ -6,10 +6,10 @@ from ...._utils import PropertyInfo -__all__ = ["KeyUpdateParams"] +__all__ = ["OpenAIUpdateParams"] -class KeyUpdateParams(TypedDict, total=False): +class OpenAIUpdateParams(TypedDict, total=False): api_key: str body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] diff --git a/src/gradientai/types/model_providers/openai/key_retrieve_response.py b/src/gradientai/types/models/providers/openai_update_response.py similarity index 78% rename from src/gradientai/types/model_providers/openai/key_retrieve_response.py rename to src/gradientai/types/models/providers/openai_update_response.py index 7015b6f7..9bb80518 100644 --- a/src/gradientai/types/model_providers/openai/key_retrieve_response.py +++ b/src/gradientai/types/models/providers/openai_update_response.py @@ -5,8 +5,8 @@ from ...._models import BaseModel from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo -__all__ = ["KeyRetrieveResponse"] +__all__ = ["OpenAIUpdateResponse"] -class KeyRetrieveResponse(BaseModel): +class OpenAIUpdateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/tests/api_resources/model_providers/openai/__init__.py b/tests/api_resources/model_providers/openai/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/model_providers/openai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/model_providers/__init__.py b/tests/api_resources/models/__init__.py similarity index 100% rename from tests/api_resources/model_providers/__init__.py rename to tests/api_resources/models/__init__.py diff --git a/tests/api_resources/model_providers/anthropic/__init__.py b/tests/api_resources/models/providers/__init__.py similarity index 100% rename from tests/api_resources/model_providers/anthropic/__init__.py rename to tests/api_resources/models/providers/__init__.py diff --git a/tests/api_resources/model_providers/anthropic/test_keys.py b/tests/api_resources/models/providers/test_anthropic.py similarity index 59% rename from tests/api_resources/model_providers/anthropic/test_keys.py rename to tests/api_resources/models/providers/test_anthropic.py index fd4ffb0f..79bfcdc3 100644 --- a/tests/api_resources/model_providers/anthropic/test_keys.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -9,89 +9,89 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.model_providers.anthropic import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyListAgentsResponse, +from gradientai.types.models.providers import ( + AnthropicListResponse, + AnthropicCreateResponse, + AnthropicDeleteResponse, + AnthropicUpdateResponse, + AnthropicRetrieveResponse, + AnthropicListAgentsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestKeys: +class TestAnthropic: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + anthropic = client.models.providers.anthropic.create() + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.create( + anthropic = client.models.providers.anthropic.create( api_key="api_key", name="name", ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.model_providers.anthropic.keys.with_raw_response.create() + response = client.models.providers.anthropic.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.model_providers.anthropic.keys.with_streaming_response.create() as response: + with client.models.providers.anthropic.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.retrieve( + anthropic = client.models.providers.anthropic.retrieve( "api_key_uuid", ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.model_providers.anthropic.keys.with_raw_response.retrieve( + response = client.models.providers.anthropic.with_raw_response.retrieve( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.model_providers.anthropic.keys.with_streaming_response.retrieve( + with client.models.providers.anthropic.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -99,52 +99,52 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.model_providers.anthropic.keys.with_raw_response.retrieve( + client.models.providers.anthropic.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.update( + anthropic = client.models.providers.anthropic.update( path_api_key_uuid="api_key_uuid", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.update( + anthropic = client.models.providers.anthropic.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", name="name", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.model_providers.anthropic.keys.with_raw_response.update( + response = client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.model_providers.anthropic.keys.with_streaming_response.update( + with client.models.providers.anthropic.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -152,78 +152,78 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.model_providers.anthropic.keys.with_raw_response.update( + client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) + anthropic = client.models.providers.anthropic.list() + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.list( + anthropic = client.models.providers.anthropic.list( page=0, per_page=0, ) - assert_matches_type(KeyListResponse, key, path=["response"]) + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.model_providers.anthropic.keys.with_raw_response.list() + response = client.models.providers.anthropic.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.model_providers.anthropic.keys.with_streaming_response.list() as response: + with client.models.providers.anthropic.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.delete( + anthropic = client.models.providers.anthropic.delete( "api_key_uuid", ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.model_providers.anthropic.keys.with_raw_response.delete( + response = client.models.providers.anthropic.with_raw_response.delete( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.model_providers.anthropic.keys.with_streaming_response.delete( + with client.models.providers.anthropic.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -231,51 +231,51 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.model_providers.anthropic.keys.with_raw_response.delete( + client.models.providers.anthropic.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_list_agents(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.list_agents( + anthropic = client.models.providers.anthropic.list_agents( uuid="uuid", ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.anthropic.keys.list_agents( + anthropic = client.models.providers.anthropic.list_agents( uuid="uuid", page=0, per_page=0, ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list_agents(self, client: GradientAI) -> None: - response = client.model_providers.anthropic.keys.with_raw_response.list_agents( + response = client.models.providers.anthropic.with_raw_response.list_agents( uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list_agents(self, client: GradientAI) -> None: - with client.model_providers.anthropic.keys.with_streaming_response.list_agents( + with client.models.providers.anthropic.with_streaming_response.list_agents( uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + anthropic = response.parse() + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -283,12 +283,12 @@ def test_streaming_response_list_agents(self, client: GradientAI) -> None: @parametrize def test_path_params_list_agents(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.model_providers.anthropic.keys.with_raw_response.list_agents( + client.models.providers.anthropic.with_raw_response.list_agents( uuid="", ) -class TestAsyncKeys: +class TestAsyncAnthropic: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) @@ -296,71 +296,71 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + anthropic = await async_client.models.providers.anthropic.create() + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.create( + anthropic = await async_client.models.providers.anthropic.create( api_key="api_key", name="name", ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.anthropic.keys.with_raw_response.create() + response = await async_client.models.providers.anthropic.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.anthropic.keys.with_streaming_response.create() as response: + async with async_client.models.providers.anthropic.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.retrieve( + anthropic = await async_client.models.providers.anthropic.retrieve( "api_key_uuid", ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.anthropic.keys.with_raw_response.retrieve( + response = await async_client.models.providers.anthropic.with_raw_response.retrieve( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.anthropic.keys.with_streaming_response.retrieve( + async with async_client.models.providers.anthropic.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -368,52 +368,52 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.model_providers.anthropic.keys.with_raw_response.retrieve( + await async_client.models.providers.anthropic.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.update( + anthropic = await async_client.models.providers.anthropic.update( path_api_key_uuid="api_key_uuid", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.update( + anthropic = await async_client.models.providers.anthropic.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", name="name", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.anthropic.keys.with_raw_response.update( + response = await async_client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.anthropic.keys.with_streaming_response.update( + async with async_client.models.providers.anthropic.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -421,78 +421,78 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.model_providers.anthropic.keys.with_raw_response.update( + await async_client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) + anthropic = await async_client.models.providers.anthropic.list() + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.list( + anthropic = await async_client.models.providers.anthropic.list( page=0, per_page=0, ) - assert_matches_type(KeyListResponse, key, path=["response"]) + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.anthropic.keys.with_raw_response.list() + response = await async_client.models.providers.anthropic.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.anthropic.keys.with_streaming_response.list() as response: + async with async_client.models.providers.anthropic.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.delete( + anthropic = await async_client.models.providers.anthropic.delete( "api_key_uuid", ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.anthropic.keys.with_raw_response.delete( + response = await async_client.models.providers.anthropic.with_raw_response.delete( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.anthropic.keys.with_streaming_response.delete( + async with async_client.models.providers.anthropic.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -500,51 +500,51 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.model_providers.anthropic.keys.with_raw_response.delete( + await async_client.models.providers.anthropic.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.list_agents( + anthropic = await async_client.models.providers.anthropic.list_agents( uuid="uuid", ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.anthropic.keys.list_agents( + anthropic = await async_client.models.providers.anthropic.list_agents( uuid="uuid", page=0, per_page=0, ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.anthropic.keys.with_raw_response.list_agents( + response = await async_client.models.providers.anthropic.with_raw_response.list_agents( uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.anthropic.keys.with_streaming_response.list_agents( + async with async_client.models.providers.anthropic.with_streaming_response.list_agents( uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + anthropic = await response.parse() + assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) assert cast(Any, response.is_closed) is True @@ -552,6 +552,6 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradientA @parametrize async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.model_providers.anthropic.keys.with_raw_response.list_agents( + await async_client.models.providers.anthropic.with_raw_response.list_agents( uuid="", ) diff --git a/tests/api_resources/model_providers/openai/test_keys.py b/tests/api_resources/models/providers/test_openai.py similarity index 62% rename from tests/api_resources/model_providers/openai/test_keys.py rename to tests/api_resources/models/providers/test_openai.py index f0f1eda0..2640601e 100644 --- a/tests/api_resources/model_providers/openai/test_keys.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -9,89 +9,89 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.model_providers.openai import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyRetrieveAgentsResponse, +from gradientai.types.models.providers import ( + OpenAIListResponse, + OpenAICreateResponse, + OpenAIDeleteResponse, + OpenAIUpdateResponse, + OpenAIRetrieveResponse, + OpenAIRetrieveAgentsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestKeys: +class TestOpenAI: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + openai = client.models.providers.openai.create() + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.create( + openai = client.models.providers.openai.create( api_key="api_key", name="name", ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.model_providers.openai.keys.with_raw_response.create() + response = client.models.providers.openai.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.model_providers.openai.keys.with_streaming_response.create() as response: + with client.models.providers.openai.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.retrieve( + openai = client.models.providers.openai.retrieve( "api_key_uuid", ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.model_providers.openai.keys.with_raw_response.retrieve( + response = client.models.providers.openai.with_raw_response.retrieve( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.model_providers.openai.keys.with_streaming_response.retrieve( + with client.models.providers.openai.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -99,52 +99,52 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.model_providers.openai.keys.with_raw_response.retrieve( + client.models.providers.openai.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.update( + openai = client.models.providers.openai.update( path_api_key_uuid="api_key_uuid", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.update( + openai = client.models.providers.openai.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", name="name", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.model_providers.openai.keys.with_raw_response.update( + response = client.models.providers.openai.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.model_providers.openai.keys.with_streaming_response.update( + with client.models.providers.openai.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -152,78 +152,78 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.model_providers.openai.keys.with_raw_response.update( + client.models.providers.openai.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) + openai = client.models.providers.openai.list() + assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.list( + openai = client.models.providers.openai.list( page=0, per_page=0, ) - assert_matches_type(KeyListResponse, key, path=["response"]) + assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.model_providers.openai.keys.with_raw_response.list() + response = client.models.providers.openai.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.model_providers.openai.keys.with_streaming_response.list() as response: + with client.models.providers.openai.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIListResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.delete( + openai = client.models.providers.openai.delete( "api_key_uuid", ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.model_providers.openai.keys.with_raw_response.delete( + response = client.models.providers.openai.with_raw_response.delete( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.model_providers.openai.keys.with_streaming_response.delete( + with client.models.providers.openai.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -231,51 +231,51 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.model_providers.openai.keys.with_raw_response.delete( + client.models.providers.openai.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_retrieve_agents(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.retrieve_agents( + openai = client.models.providers.openai.retrieve_agents( uuid="uuid", ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: - key = client.model_providers.openai.keys.retrieve_agents( + openai = client.models.providers.openai.retrieve_agents( uuid="uuid", page=0, per_page=0, ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: - response = client.model_providers.openai.keys.with_raw_response.retrieve_agents( + response = client.models.providers.openai.with_raw_response.retrieve_agents( uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: - with client.model_providers.openai.keys.with_streaming_response.retrieve_agents( + with client.models.providers.openai.with_streaming_response.retrieve_agents( uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + openai = response.parse() + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -283,12 +283,12 @@ def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve_agents(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.model_providers.openai.keys.with_raw_response.retrieve_agents( + client.models.providers.openai.with_raw_response.retrieve_agents( uuid="", ) -class TestAsyncKeys: +class TestAsyncOpenAI: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) @@ -296,71 +296,71 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + openai = await async_client.models.providers.openai.create() + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.create( + openai = await async_client.models.providers.openai.create( api_key="api_key", name="name", ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.openai.keys.with_raw_response.create() + response = await async_client.models.providers.openai.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.openai.keys.with_streaming_response.create() as response: + async with async_client.models.providers.openai.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAICreateResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.retrieve( + openai = await async_client.models.providers.openai.retrieve( "api_key_uuid", ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.openai.keys.with_raw_response.retrieve( + response = await async_client.models.providers.openai.with_raw_response.retrieve( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.openai.keys.with_streaming_response.retrieve( + async with async_client.models.providers.openai.with_streaming_response.retrieve( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -368,52 +368,52 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.model_providers.openai.keys.with_raw_response.retrieve( + await async_client.models.providers.openai.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.update( + openai = await async_client.models.providers.openai.update( path_api_key_uuid="api_key_uuid", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.update( + openai = await async_client.models.providers.openai.update( path_api_key_uuid="api_key_uuid", api_key="api_key", body_api_key_uuid="api_key_uuid", name="name", ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.openai.keys.with_raw_response.update( + response = await async_client.models.providers.openai.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.openai.keys.with_streaming_response.update( + async with async_client.models.providers.openai.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -421,78 +421,78 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.model_providers.openai.keys.with_raw_response.update( + await async_client.models.providers.openai.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) + openai = await async_client.models.providers.openai.list() + assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.list( + openai = await async_client.models.providers.openai.list( page=0, per_page=0, ) - assert_matches_type(KeyListResponse, key, path=["response"]) + assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.openai.keys.with_raw_response.list() + response = await async_client.models.providers.openai.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.openai.keys.with_streaming_response.list() as response: + async with async_client.models.providers.openai.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIListResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.delete( + openai = await async_client.models.providers.openai.delete( "api_key_uuid", ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.openai.keys.with_raw_response.delete( + response = await async_client.models.providers.openai.with_raw_response.delete( "api_key_uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.openai.keys.with_streaming_response.delete( + async with async_client.models.providers.openai.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -500,51 +500,51 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.model_providers.openai.keys.with_raw_response.delete( + await async_client.models.providers.openai.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.retrieve_agents( + openai = await async_client.models.providers.openai.retrieve_agents( uuid="uuid", ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.model_providers.openai.keys.retrieve_agents( + openai = await async_client.models.providers.openai.retrieve_agents( uuid="uuid", page=0, per_page=0, ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - response = await async_client.model_providers.openai.keys.with_raw_response.retrieve_agents( + response = await async_client.models.providers.openai.with_raw_response.retrieve_agents( uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: - async with async_client.model_providers.openai.keys.with_streaming_response.retrieve_agents( + async with async_client.models.providers.openai.with_streaming_response.retrieve_agents( uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + openai = await response.parse() + assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) assert cast(Any, response.is_closed) is True @@ -552,6 +552,6 @@ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradi @parametrize async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.model_providers.openai.keys.with_raw_response.retrieve_agents( + await async_client.models.providers.openai.with_raw_response.retrieve_agents( uuid="", ) From 0693cbb76358784dbdb9013143aa986d646f7f3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 28 Jun 2025 17:19:02 +0000 Subject: [PATCH 098/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3b005e52..ee49ac2d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.10" + ".": "0.1.0-alpha.11" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 807b47af..8e835157 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index f1fdf3c0..5c407722 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.10" # x-release-please-version +__version__ = "0.1.0-alpha.11" # x-release-please-version From b54ac44972789de60c1da8c04c4baef1f3297c04 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 02:16:42 +0000 Subject: [PATCH 099/200] chore(internal): codegen related update --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6bfd00b1..aef77646 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,6 +17,7 @@ jobs: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -42,6 +43,7 @@ jobs: contents: read id-token: write runs-on: depot-ubuntu-24.04 + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -62,6 +64,7 @@ jobs: timeout-minutes: 10 name: test runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 From 6d9d9f9e923cefd0fc9bbb4050c88fc9a6e0ac20 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 02:34:56 +0000 Subject: [PATCH 100/200] fix(ci): correct conditional --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aef77646..757c3196 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,14 +36,13 @@ jobs: run: ./scripts/lint upload: - if: github.repository == 'stainless-sdks/gradientai-python' + if: github.repository == 'stainless-sdks/gradientai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) timeout-minutes: 10 name: upload permissions: contents: read id-token: write runs-on: depot-ubuntu-24.04 - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 From 32d5829f86c285d99b58e5167ecfea8728c1d669 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 05:22:35 +0000 Subject: [PATCH 101/200] chore(ci): change upload type --- .github/workflows/ci.yml | 18 ++++++++++++++++-- scripts/utils/upload-artifact.sh | 12 +++++++----- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 757c3196..3acbc370 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,10 +35,10 @@ jobs: - name: Run lints run: ./scripts/lint - upload: + build: if: github.repository == 'stainless-sdks/gradientai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) timeout-minutes: 10 - name: upload + name: build permissions: contents: read id-token: write @@ -46,6 +46,20 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + - name: Get GitHub OIDC Token id: github-oidc uses: actions/github-script@v6 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index eb717c71..170e8cfe 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash set -exuo pipefail -RESPONSE=$(curl -X POST "$URL" \ +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ -H "Authorization: Bearer $AUTH" \ -H "Content-Type: application/json") @@ -12,13 +14,13 @@ if [[ "$SIGNED_URL" == "null" ]]; then exit 1 fi -UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ - -H "Content-Type: application/gzip" \ - --data-binary @- "$SIGNED_URL" 2>&1) +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/gradientai-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/gradientai-python/$SHA/$FILENAME'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From 0b48fa594e15ad733ce1520283faaabcec015f3d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Jul 2025 06:57:08 +0000 Subject: [PATCH 102/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ee49ac2d..fd0ccba9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.11" + ".": "0.1.0-alpha.12" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 8e835157..2a34f8a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.11" +version = "0.1.0-alpha.12" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index 5c407722..0084d0f3 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.11" # x-release-please-version +__version__ = "0.1.0-alpha.12" # x-release-please-version From 547c18abe015819b2e3e9400f108fb5fcc33a3a6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Jul 2025 07:37:47 +0000 Subject: [PATCH 103/200] feat(api): manual updates Add GRADIENTAI_AGENT_KEY --- .stats.yml | 2 +- src/gradientai/_client.py | 18 +++++ tests/conftest.py | 8 +- tests/test_client.py | 149 +++++++++++++++++++++++++++++++++----- 4 files changed, 157 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0a6b7a71..9ebb83f9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 1c936b3bd798c3fcb25479b19efa999a +config_hash: 9c2e548d86a376bc5f6c458de6944504 diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 2dc19e49..c696258b 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -56,6 +56,7 @@ class GradientAI(SyncAPIClient): # client options api_key: str | None inference_key: str | None + agent_key: str | None agent_domain: str | None def __init__( @@ -63,6 +64,7 @@ def __init__( *, api_key: str | None = None, inference_key: str | None = None, + agent_key: str | None = None, agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, @@ -88,6 +90,7 @@ def __init__( This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `GRADIENTAI_API_KEY` - `inference_key` from `GRADIENTAI_INFERENCE_KEY` + - `agent_key` from `GRADIENTAI_AGENT_KEY` """ if api_key is None: api_key = os.environ.get("GRADIENTAI_API_KEY") @@ -97,6 +100,10 @@ def __init__( inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") self.inference_key = inference_key + if agent_key is None: + agent_key = os.environ.get("GRADIENTAI_AGENT_KEY") + self.agent_key = agent_key + self.agent_domain = agent_domain if base_url is None: @@ -200,6 +207,7 @@ def copy( *, api_key: str | None = None, inference_key: str | None = None, + agent_key: str | None = None, agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, @@ -236,6 +244,7 @@ def copy( client = self.__class__( api_key=api_key or self.api_key, inference_key=inference_key or self.inference_key, + agent_key=agent_key or self.agent_key, agent_domain=agent_domain or self.agent_domain, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -290,6 +299,7 @@ class AsyncGradientAI(AsyncAPIClient): # client options api_key: str | None inference_key: str | None + agent_key: str | None agent_domain: str | None def __init__( @@ -297,6 +307,7 @@ def __init__( *, api_key: str | None = None, inference_key: str | None = None, + agent_key: str | None = None, agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, @@ -322,6 +333,7 @@ def __init__( This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `GRADIENTAI_API_KEY` - `inference_key` from `GRADIENTAI_INFERENCE_KEY` + - `agent_key` from `GRADIENTAI_AGENT_KEY` """ if api_key is None: api_key = os.environ.get("GRADIENTAI_API_KEY") @@ -331,6 +343,10 @@ def __init__( inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") self.inference_key = inference_key + if agent_key is None: + agent_key = os.environ.get("GRADIENTAI_AGENT_KEY") + self.agent_key = agent_key + self.agent_domain = agent_domain if base_url is None: @@ -434,6 +450,7 @@ def copy( *, api_key: str | None = None, inference_key: str | None = None, + agent_key: str | None = None, agent_domain: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, @@ -470,6 +487,7 @@ def copy( client = self.__class__( api_key=api_key or self.api_key, inference_key=inference_key or self.inference_key, + agent_key=agent_key or self.agent_key, agent_domain=agent_domain or self.agent_domain, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, diff --git a/tests/conftest.py b/tests/conftest.py index 39547c5d..5b24e1c2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,6 +47,7 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: api_key = "My API Key" inference_key = "My Inference Key" +agent_key = "My Agent Key" @pytest.fixture(scope="session") @@ -56,7 +57,11 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]: raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") with GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=strict + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=strict, ) as client: yield client @@ -85,6 +90,7 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=strict, http_client=http_client, ) as client: diff --git a/tests/test_client.py b/tests/test_client.py index 16220895..c901e2c8 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -40,6 +40,7 @@ base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") api_key = "My API Key" inference_key = "My Inference Key" +agent_key = "My Agent Key" def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: @@ -62,7 +63,11 @@ def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int: class TestGradientAI: client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) @pytest.mark.respx(base_url=base_url) @@ -97,6 +102,10 @@ def test_copy(self) -> None: assert copied.inference_key == "another My Inference Key" assert self.client.inference_key == "My Inference Key" + copied = self.client.copy(agent_key="another My Agent Key") + assert copied.agent_key == "another My Agent Key" + assert self.client.agent_key == "My Agent Key" + def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) @@ -118,6 +127,7 @@ def test_copy_default_headers(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -156,6 +166,7 @@ def test_copy_default_query(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_query={"foo": "bar"}, ) @@ -286,6 +297,7 @@ def test_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, timeout=httpx.Timeout(0), ) @@ -301,6 +313,7 @@ def test_http_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=http_client, ) @@ -315,6 +328,7 @@ def test_http_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=http_client, ) @@ -329,6 +343,7 @@ def test_http_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=http_client, ) @@ -344,6 +359,7 @@ async def test_invalid_http_client(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -353,6 +369,7 @@ def test_default_headers_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -364,6 +381,7 @@ def test_default_headers_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -376,14 +394,22 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" with update_env(**{"GRADIENTAI_API_KEY": Omit()}): client2 = GradientAI( - base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=None, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) with pytest.raises( @@ -402,6 +428,7 @@ def test_default_query_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -607,6 +634,7 @@ def test_base_url_setter(self) -> None: base_url="https://example.com/from_init", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -617,7 +645,9 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): - client = GradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True) + client = GradientAI( + api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True + ) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( @@ -627,12 +657,14 @@ def test_base_url_env(self) -> None: base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ), GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -656,12 +688,14 @@ def test_base_url_trailing_slash(self, client: GradientAI) -> None: base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ), GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -685,12 +719,14 @@ def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ), GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -709,7 +745,11 @@ def test_absolute_request_url(self, client: GradientAI) -> None: def test_copied_client_does_not_close_http(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) assert not client.is_closed() @@ -722,7 +762,11 @@ def test_copied_client_does_not_close_http(self) -> None: def test_client_context_manager(self) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) with client as c2: assert c2 is client @@ -748,6 +792,7 @@ def test_client_max_retries_validation(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -771,14 +816,22 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) strict_client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=False, ) response = client.get("/foo", cast_to=Model) @@ -808,7 +861,11 @@ class Model(BaseModel): @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = GradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) headers = httpx.Headers({"retry-after": retry_after}) @@ -1006,7 +1063,11 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: class TestAsyncGradientAI: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) @pytest.mark.respx(base_url=base_url) @@ -1043,6 +1104,10 @@ def test_copy(self) -> None: assert copied.inference_key == "another My Inference Key" assert self.client.inference_key == "My Inference Key" + copied = self.client.copy(agent_key="another My Agent Key") + assert copied.agent_key == "another My Agent Key" + assert self.client.agent_key == "My Agent Key" + def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) @@ -1064,6 +1129,7 @@ def test_copy_default_headers(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1102,6 +1168,7 @@ def test_copy_default_query(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_query={"foo": "bar"}, ) @@ -1232,6 +1299,7 @@ async def test_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, timeout=httpx.Timeout(0), ) @@ -1247,6 +1315,7 @@ async def test_http_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=http_client, ) @@ -1261,6 +1330,7 @@ async def test_http_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=http_client, ) @@ -1275,6 +1345,7 @@ async def test_http_client_timeout_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=http_client, ) @@ -1290,6 +1361,7 @@ def test_invalid_http_client(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -1299,6 +1371,7 @@ def test_default_headers_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1310,6 +1383,7 @@ def test_default_headers_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -1322,14 +1396,22 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" with update_env(**{"GRADIENTAI_API_KEY": Omit()}): client2 = AsyncGradientAI( - base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=None, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) with pytest.raises( @@ -1348,6 +1430,7 @@ def test_default_query_option(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -1553,6 +1636,7 @@ def test_base_url_setter(self) -> None: base_url="https://example.com/from_init", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -1563,7 +1647,9 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): - client = AsyncGradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True) + client = AsyncGradientAI( + api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True + ) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( @@ -1573,12 +1659,14 @@ def test_base_url_env(self) -> None: base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ), AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1602,12 +1690,14 @@ def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ), AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1631,12 +1721,14 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, ), AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1655,7 +1747,11 @@ def test_absolute_request_url(self, client: AsyncGradientAI) -> None: async def test_copied_client_does_not_close_http(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) assert not client.is_closed() @@ -1669,7 +1765,11 @@ async def test_copied_client_does_not_close_http(self) -> None: async def test_client_context_manager(self) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) async with client as c2: assert c2 is client @@ -1696,6 +1796,7 @@ async def test_client_max_retries_validation(self) -> None: base_url=base_url, api_key=api_key, inference_key=inference_key, + agent_key=agent_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -1721,14 +1822,22 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) strict_client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=False, ) response = await client.get("/foo", cast_to=Model) @@ -1759,7 +1868,11 @@ class Model(BaseModel): @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = AsyncGradientAI( - base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True + base_url=base_url, + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) headers = httpx.Headers({"retry-after": retry_after}) From 7e0cea9a4088dab308462c87f29d572b69019fc5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Jul 2025 13:53:57 +0000 Subject: [PATCH 104/200] feat(api): share chat completion chunk model between chat and agent.chat --- .stats.yml | 2 +- api.md | 6 +- .../resources/agents/chat/completions.py | 18 ++-- src/gradientai/resources/chat/completions.py | 2 +- src/gradientai/types/__init__.py | 7 +- src/gradientai/types/agents/chat/__init__.py | 1 - .../chat/agent_chat_completion_chunk.py | 93 ------------------- src/gradientai/types/chat/__init__.py | 1 - src/gradientai/types/shared/__init__.py | 1 + .../{chat => shared}/chat_completion_chunk.py | 2 +- 10 files changed, 22 insertions(+), 111 deletions(-) delete mode 100644 src/gradientai/types/agents/chat/agent_chat_completion_chunk.py rename src/gradientai/types/{chat => shared}/chat_completion_chunk.py (97%) diff --git a/.stats.yml b/.stats.yml index 9ebb83f9..bfeef284 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 9c2e548d86a376bc5f6c458de6944504 +config_hash: 6ed9ee8d3f0d6392816bfaf9dc4894a6 diff --git a/api.md b/api.md index 65699eaa..af2e8a33 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from gradientai.types import APILinks, APIMeta, ChatCompletionTokenLogprob +from gradientai.types import APILinks, APIMeta, ChatCompletionChunk, ChatCompletionTokenLogprob ``` # Agents @@ -65,7 +65,7 @@ Methods: Types: ```python -from gradientai.types.agents.chat import AgentChatCompletionChunk, CompletionCreateResponse +from gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: @@ -260,7 +260,7 @@ Methods: Types: ```python -from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse ``` Methods: diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 7acba243..5923ef2a 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -20,8 +20,8 @@ from ...._streaming import Stream, AsyncStream from ...._base_client import make_request_options from ....types.agents.chat import completion_create_params +from ....types.shared.chat_completion_chunk import ChatCompletionChunk from ....types.agents.chat.completion_create_response import CompletionCreateResponse -from ....types.agents.chat.agent_chat_completion_chunk import AgentChatCompletionChunk __all__ = ["CompletionsResource", "AsyncCompletionsResource"] @@ -186,7 +186,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Stream[AgentChatCompletionChunk]: + ) -> Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -299,7 +299,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]: + ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -412,7 +412,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]: + ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: return self._post( "/chat/completions" if self._client._base_url_overridden @@ -446,7 +446,7 @@ def create( ), cast_to=CompletionCreateResponse, stream=stream or False, - stream_cls=Stream[AgentChatCompletionChunk], + stream_cls=Stream[ChatCompletionChunk], ) @@ -610,7 +610,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncStream[AgentChatCompletionChunk]: + ) -> AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -723,7 +723,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]: + ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -836,7 +836,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]: + ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions" if self._client._base_url_overridden @@ -870,7 +870,7 @@ async def create( ), cast_to=CompletionCreateResponse, stream=stream or False, - stream_cls=AsyncStream[AgentChatCompletionChunk], + stream_cls=AsyncStream[ChatCompletionChunk], ) diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index 79f37901..6847cfc5 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -20,7 +20,7 @@ from ..._streaming import Stream, AsyncStream from ...types.chat import completion_create_params from ..._base_client import make_request_options -from ...types.chat.chat_completion_chunk import ChatCompletionChunk +from ...types.shared.chat_completion_chunk import ChatCompletionChunk from ...types.chat.completion_create_response import CompletionCreateResponse __all__ = ["CompletionsResource", "AsyncCompletionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 4ec63b92..c8144381 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,7 +2,12 @@ from __future__ import annotations -from .shared import APIMeta as APIMeta, APILinks as APILinks, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .shared import ( + APIMeta as APIMeta, + APILinks as APILinks, + ChatCompletionChunk as ChatCompletionChunk, + ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, +) from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py index 305ba0af..9384ac14 100644 --- a/src/gradientai/types/agents/chat/__init__.py +++ b/src/gradientai/types/agents/chat/__init__.py @@ -4,4 +4,3 @@ from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse -from .agent_chat_completion_chunk import AgentChatCompletionChunk as AgentChatCompletionChunk diff --git a/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py b/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py deleted file mode 100644 index 36ee3d9e..00000000 --- a/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py +++ /dev/null @@ -1,93 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ...._models import BaseModel -from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob - -__all__ = ["AgentChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] - - -class ChoiceDelta(BaseModel): - content: Optional[str] = None - """The contents of the chunk message.""" - - refusal: Optional[str] = None - """The refusal message generated by the model.""" - - role: Optional[Literal["developer", "user", "assistant"]] = None - """The role of the author of this message.""" - - -class ChoiceLogprobs(BaseModel): - content: Optional[List[ChatCompletionTokenLogprob]] = None - """A list of message content tokens with log probability information.""" - - refusal: Optional[List[ChatCompletionTokenLogprob]] = None - """A list of message refusal tokens with log probability information.""" - - -class Choice(BaseModel): - delta: ChoiceDelta - """A chat completion delta generated by streamed model responses.""" - - finish_reason: Optional[Literal["stop", "length"]] = None - """The reason the model stopped generating tokens. - - This will be `stop` if the model hit a natural stop point or a provided stop - sequence, or `length` if the maximum number of tokens specified in the request - was reached - """ - - index: int - """The index of the choice in the list of choices.""" - - logprobs: Optional[ChoiceLogprobs] = None - """Log probability information for the choice.""" - - -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - -class AgentChatCompletionChunk(BaseModel): - id: str - """A unique identifier for the chat completion. Each chunk has the same ID.""" - - choices: List[Choice] - """A list of chat completion choices. - - Can contain more than one elements if `n` is greater than 1. Can also be empty - for the last chunk if you set `stream_options: {"include_usage": true}`. - """ - - created: int - """The Unix timestamp (in seconds) of when the chat completion was created. - - Each chunk has the same timestamp. - """ - - model: str - """The model to generate the completion.""" - - object: Literal["chat.completion.chunk"] - """The object type, which is always `chat.completion.chunk`.""" - - usage: Optional[Usage] = None - """ - An optional field that will only be present when you set - `stream_options: {"include_usage": true}` in your request. When present, it - contains a null value **except for the last chunk** which contains the token - usage statistics for the entire request. - - **NOTE:** If the stream is interrupted or cancelled, you may not receive the - final usage chunk which contains the total token usage for the request. - """ diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py index f0243162..9384ac14 100644 --- a/src/gradientai/types/chat/__init__.py +++ b/src/gradientai/types/chat/__init__.py @@ -2,6 +2,5 @@ from __future__ import annotations -from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse diff --git a/src/gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py index dc71bdd3..9fdd7605 100644 --- a/src/gradientai/types/shared/__init__.py +++ b/src/gradientai/types/shared/__init__.py @@ -2,4 +2,5 @@ from .api_meta import APIMeta as APIMeta from .api_links import APILinks as APILinks +from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/chat/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py similarity index 97% rename from src/gradientai/types/chat/chat_completion_chunk.py rename to src/gradientai/types/shared/chat_completion_chunk.py index 4adcc63d..4d45ef8d 100644 --- a/src/gradientai/types/chat/chat_completion_chunk.py +++ b/src/gradientai/types/shared/chat_completion_chunk.py @@ -4,7 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel -from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob +from .chat_completion_token_logprob import ChatCompletionTokenLogprob __all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] From 618e4b6921af4b263af7d856e4f7412c481dd848 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Jul 2025 13:57:58 +0000 Subject: [PATCH 105/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index bfeef284..55043d79 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 76 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 6ed9ee8d3f0d6392816bfaf9dc4894a6 +config_hash: 558ec54e9e056494abf623ff424c104e From 3e0a145452e74f3a3a516a4e2c0427a12f6b4901 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Jul 2025 14:03:42 +0000 Subject: [PATCH 106/200] feat(api): manual updates --- .stats.yml | 6 ++--- README.md | 24 +++++++++---------- api.md | 2 +- .../resources/agents/chat/completions.py | 8 +++---- tests/test_client.py | 20 ++++++++-------- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/.stats.yml b/.stats.yml index 55043d79..1787fb3e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 76 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml +configured_endpoints: 77 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 558ec54e9e056494abf623ff424c104e +config_hash: 6edaff3557194ba8897d14f7ca74589c diff --git a/README.md b/README.md index f5fc7b7d..a11725da 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) -completion = client.agents.chat.completions.create( +completion = client.chat.completions.create( messages=[ { "role": "user", @@ -63,7 +63,7 @@ client = AsyncGradientAI( async def main() -> None: - completion = await client.agents.chat.completions.create( + completion = await client.chat.completions.create( messages=[ { "role": "user", @@ -105,7 +105,7 @@ async def main() -> None: api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: - completion = await client.agents.chat.completions.create( + completion = await client.chat.completions.create( messages=[ { "role": "user", @@ -129,7 +129,7 @@ from gradientai import GradientAI client = GradientAI() -stream = client.agents.chat.completions.create( +stream = client.chat.completions.create( messages=[ { "role": "user", @@ -150,7 +150,7 @@ from gradientai import AsyncGradientAI client = AsyncGradientAI() -stream = await client.agents.chat.completions.create( +stream = await client.chat.completions.create( messages=[ { "role": "user", @@ -182,7 +182,7 @@ from gradientai import GradientAI client = GradientAI() -completion = client.agents.chat.completions.create( +completion = client.chat.completions.create( messages=[ { "content": "string", @@ -211,7 +211,7 @@ from gradientai import GradientAI client = GradientAI() try: - client.agents.chat.completions.create( + client.chat.completions.create( messages=[ { "role": "user", @@ -262,7 +262,7 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).agents.chat.completions.create( +client.with_options(max_retries=5).chat.completions.create( messages=[ { "role": "user", @@ -293,7 +293,7 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).agents.chat.completions.create( +client.with_options(timeout=5.0).chat.completions.create( messages=[ { "role": "user", @@ -342,7 +342,7 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.agents.chat.completions.with_raw_response.create( +response = client.chat.completions.with_raw_response.create( messages=[{ "role": "user", "content": "What is the capital of France?", @@ -351,7 +351,7 @@ response = client.agents.chat.completions.with_raw_response.create( ) print(response.headers.get('X-My-Header')) -completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned +completion = response.parse() # get the object that `chat.completions.create()` would have returned print(completion.choices) ``` @@ -366,7 +366,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.agents.chat.completions.with_streaming_response.create( +with client.chat.completions.with_streaming_response.create( messages=[ { "role": "user", diff --git a/api.md b/api.md index af2e8a33..c6acd4ec 100644 --- a/api.md +++ b/api.md @@ -70,7 +70,7 @@ from gradientai.types.agents.chat import CompletionCreateResponse Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 5923ef2a..4ec70d30 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -414,9 +414,9 @@ def create( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: return self._post( - "/chat/completions" + "/chat/completions?agent=true" if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions", + else "https://inference.do-ai.run/v1/chat/completions?agent=true", body=maybe_transform( { "messages": messages, @@ -838,9 +838,9 @@ async def create( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: return await self._post( - "/chat/completions" + "/chat/completions?agent=true" if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions", + else "https://inference.do-ai.run/v1/chat/completions?agent=true", body=await async_maybe_transform( { "messages": messages, diff --git a/tests/test_client.py b/tests/test_client.py index c901e2c8..5c16eb22 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -879,7 +879,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.agents.chat.completions.with_streaming_response.create( + client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -897,7 +897,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.agents.chat.completions.with_streaming_response.create( + client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -934,7 +934,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.agents.chat.completions.with_raw_response.create( + response = client.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -966,7 +966,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.agents.chat.completions.with_raw_response.create( + response = client.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -998,7 +998,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = client.agents.chat.completions.with_raw_response.create( + response = client.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -1888,7 +1888,7 @@ async def test_retrying_timeout_errors_doesnt_leak( respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.agents.chat.completions.with_streaming_response.create( + await async_client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -1908,7 +1908,7 @@ async def test_retrying_status_errors_doesnt_leak( respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.agents.chat.completions.with_streaming_response.create( + await async_client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", @@ -1946,7 +1946,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.agents.chat.completions.with_raw_response.create( + response = await client.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -1979,7 +1979,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.agents.chat.completions.with_raw_response.create( + response = await client.chat.completions.with_raw_response.create( messages=[ { "content": "string", @@ -2012,7 +2012,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.post("/chat/completions").mock(side_effect=retry_handler) - response = await client.agents.chat.completions.with_raw_response.create( + response = await client.chat.completions.with_raw_response.create( messages=[ { "content": "string", From 4537401ce35bf01f3e99ca2725e871333f53edea Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Jul 2025 02:19:36 +0000 Subject: [PATCH 107/200] chore(internal): codegen related update --- requirements-dev.lock | 2 +- requirements.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 85b6a829..7866f549 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -56,7 +56,7 @@ httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via httpx-aiohttp # via respx -httpx-aiohttp==0.1.6 +httpx-aiohttp==0.1.8 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio diff --git a/requirements.lock b/requirements.lock index 47944bd5..2a8aeea9 100644 --- a/requirements.lock +++ b/requirements.lock @@ -43,7 +43,7 @@ httpcore==1.0.2 httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via httpx-aiohttp -httpx-aiohttp==0.1.6 +httpx-aiohttp==0.1.8 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio From a949d257f7123ca5c38293ab36a2b9b25fa5ec3f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 02:36:34 +0000 Subject: [PATCH 108/200] chore(internal): bump pinned h11 dep --- requirements-dev.lock | 4 ++-- requirements.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 7866f549..94875b2e 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -48,9 +48,9 @@ filelock==3.12.4 frozenlist==1.6.2 # via aiohttp # via aiosignal -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python diff --git a/requirements.lock b/requirements.lock index 2a8aeea9..b16bfc5e 100644 --- a/requirements.lock +++ b/requirements.lock @@ -36,9 +36,9 @@ exceptiongroup==1.2.2 frozenlist==1.6.2 # via aiohttp # via aiosignal -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python From 272430c0a183ef6bc29a6662d38a9c2155f50fc9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 02:56:16 +0000 Subject: [PATCH 109/200] chore(package): mark python 3.13 as supported --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 2a34f8a2..fcef7619 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", From d8dcd78c3dd6085fbcde53814f82a2e4a96e1077 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 10 Jul 2025 03:04:54 +0000 Subject: [PATCH 110/200] fix(parsing): correctly handle nested discriminated unions --- src/gradientai/_models.py | 13 ++++++----- tests/test_models.py | 45 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/src/gradientai/_models.py b/src/gradientai/_models.py index 4f214980..528d5680 100644 --- a/src/gradientai/_models.py +++ b/src/gradientai/_models.py @@ -2,9 +2,10 @@ import os import inspect -from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast +from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( + List, Unpack, Literal, ClassVar, @@ -366,7 +367,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") - return construct_type(value=value, type_=type_) + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) def is_basemodel(type_: type) -> bool: @@ -420,7 +421,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: return cast(_T, construct_type(value=value, type_=type_)) -def construct_type(*, value: object, type_: object) -> object: +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. @@ -438,8 +439,10 @@ def construct_type(*, value: object, type_: object) -> object: type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] + if metadata is not None: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] type_ = extract_type_arg(type_, 0) else: meta = tuple() diff --git a/tests/test_models.py b/tests/test_models.py index 28aff1f3..3a857584 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -889,3 +889,48 @@ class ModelB(BaseModel): ) assert isinstance(m, ModelB) + + +def test_nested_discriminated_union() -> None: + class InnerType1(BaseModel): + type: Literal["type_1"] + + class InnerModel(BaseModel): + inner_value: str + + class InnerType2(BaseModel): + type: Literal["type_2"] + some_inner_model: InnerModel + + class Type1(BaseModel): + base_type: Literal["base_type_1"] + value: Annotated[ + Union[ + InnerType1, + InnerType2, + ], + PropertyInfo(discriminator="type"), + ] + + class Type2(BaseModel): + base_type: Literal["base_type_2"] + + T = Annotated[ + Union[ + Type1, + Type2, + ], + PropertyInfo(discriminator="base_type"), + ] + + model = construct_type( + type_=T, + value={ + "base_type": "base_type_1", + "value": { + "type": "type_2", + }, + }, + ) + assert isinstance(model, Type1) + assert isinstance(model.value, InnerType2) From c97aae9fa5fcc49c940b8ca2391fda2537075f15 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 10 Jul 2025 22:02:46 +0000 Subject: [PATCH 111/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 1787fb3e..89f80bc1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 77 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 6edaff3557194ba8897d14f7ca74589c +config_hash: 0bd094d86a010f7cbd5eb22ef548a29f From e087fe18d615e10347d3e663b624cf3afec09394 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 11 Jul 2025 03:10:30 +0000 Subject: [PATCH 112/200] chore(readme): fix version rendering on pypi --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a11725da..292c3668 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Gradient AI Python API library -[![PyPI version]()](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) + +[![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg?label=pypi%20(stable))](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, From 48c01786fa272a0cbd49003025849a9009e6d849 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 12 Jul 2025 02:16:35 +0000 Subject: [PATCH 113/200] fix(client): don't send Content-Type header on GET requests --- pyproject.toml | 2 +- src/gradientai/_base_client.py | 11 +++++++++-- tests/test_client.py | 4 ++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fcef7619..89695a11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ Homepage = "https://github.com/digitalocean/gradientai-python" Repository = "https://github.com/digitalocean/gradientai-python" [project.optional-dependencies] -aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] [tool.rye] managed = true diff --git a/src/gradientai/_base_client.py b/src/gradientai/_base_client.py index 6dce600b..379c27d1 100644 --- a/src/gradientai/_base_client.py +++ b/src/gradientai/_base_client.py @@ -529,6 +529,15 @@ def _build_request( # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -540,8 +549,6 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data if is_given(json_data) else None, - files=files, **kwargs, ) diff --git a/tests/test_client.py b/tests/test_client.py index 5c16eb22..61013a0a 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -546,7 +546,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -1548,7 +1548,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, async_client: AsyncGradientAI) -> None: request = async_client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, From 58bf2641e1c374165a33701f45a7775a48ecd1e4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 02:16:15 +0000 Subject: [PATCH 114/200] feat: clean up environment call outs --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 292c3668..67a567cc 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,6 @@ pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python[aiohttp] Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: ```python -import os import asyncio from gradientai import DefaultAioHttpClient from gradientai import AsyncGradientAI @@ -103,7 +102,7 @@ from gradientai import AsyncGradientAI async def main() -> None: async with AsyncGradientAI( - api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted + api_key="My API Key", http_client=DefaultAioHttpClient(), ) as client: completion = await client.chat.completions.create( From 995928b2351f87f208d9b6bdc8a2b8cbcbb0f06c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 06:16:25 +0000 Subject: [PATCH 115/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index fd0ccba9..000572ec 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.12" + ".": "0.1.0-alpha.13" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 89695a11..3077fd45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.12" +version = "0.1.0-alpha.13" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index 0084d0f3..e6299f17 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.12" # x-release-please-version +__version__ = "0.1.0-alpha.13" # x-release-please-version From 762325bd81cd21315fb0949bb1e8224da4628b34 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 116/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 000572ec..b0699969 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.13" + ".": "0.1.0-alpha.14" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3077fd45..0b307b64 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.13" +version = "0.1.0-alpha.14" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index e6299f17..c1d566e6 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.13" # x-release-please-version +__version__ = "0.1.0-alpha.14" # x-release-please-version From 309b3b62e2098dfa5e20d50dd432fcace40a4d63 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 22:25:45 +0000 Subject: [PATCH 117/200] feat(api): add gpu droplets --- .stats.yml | 8 +- api.md | 454 +++- src/gradientai/_client.py | 39 +- src/gradientai/resources/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 118 +- src/gradientai/resources/agents/api_keys.py | 28 +- .../resources/agents/chat/completions.py | 98 + .../agents/evaluation_metrics/__init__.py | 14 + .../evaluation_metrics/evaluation_metrics.py | 143 ++ .../agents/evaluation_metrics/models.py | 254 ++ .../evaluation_metrics/workspaces/agents.py | 24 +- .../workspaces/workspaces.py | 20 + .../resources/agents/evaluation_runs.py | 42 +- .../resources/agents/evaluation_test_cases.py | 12 +- src/gradientai/resources/agents/functions.py | 60 + src/gradientai/resources/agents/routes.py | 24 + src/gradientai/resources/agents/versions.py | 16 +- src/gradientai/resources/chat/completions.py | 98 + .../resources/gpu_droplets/__init__.py | 187 ++ .../gpu_droplets/account/__init__.py | 33 + .../resources/gpu_droplets/account/account.py | 102 + .../resources/gpu_droplets/account/keys.py | 588 +++++ .../resources/gpu_droplets/actions.py | 2048 +++++++++++++++ .../resources/gpu_droplets/autoscale.py | 967 ++++++++ .../resources/gpu_droplets/backups.py | 460 ++++ .../destroy_with_associated_resources.py | 624 +++++ .../gpu_droplets/firewalls/__init__.py | 61 + .../gpu_droplets/firewalls/droplets.py | 296 +++ .../gpu_droplets/firewalls/firewalls.py | 647 +++++ .../resources/gpu_droplets/firewalls/rules.py | 320 +++ .../resources/gpu_droplets/firewalls/tags.py | 308 +++ .../gpu_droplets/floating_ips/__init__.py | 33 + .../gpu_droplets/floating_ips/actions.py | 489 ++++ .../gpu_droplets/floating_ips/floating_ips.py | 635 +++++ .../resources/gpu_droplets/gpu_droplets.py | 2008 +++++++++++++++ .../resources/gpu_droplets/images/__init__.py | 33 + .../resources/gpu_droplets/images/actions.py | 560 +++++ .../resources/gpu_droplets/images/images.py | 867 +++++++ .../gpu_droplets/load_balancers/__init__.py | 47 + .../gpu_droplets/load_balancers/droplets.py | 302 +++ .../load_balancers/forwarding_rules.py | 301 +++ .../load_balancers/load_balancers.py | 2205 +++++++++++++++++ .../resources/gpu_droplets/sizes.py | 199 ++ .../resources/gpu_droplets/snapshots.py | 425 ++++ .../gpu_droplets/volumes/__init__.py | 47 + .../resources/gpu_droplets/volumes/actions.py | 1554 ++++++++++++ .../gpu_droplets/volumes/snapshots.py | 499 ++++ .../resources/gpu_droplets/volumes/volumes.py | 1144 +++++++++ .../resources/inference/api_keys.py | 20 +- .../resources/knowledge_bases/data_sources.py | 24 +- .../knowledge_bases/indexing_jobs.py | 18 +- .../knowledge_bases/knowledge_bases.py | 28 +- src/gradientai/resources/models/models.py | 182 +- .../resources/models/providers/anthropic.py | 36 +- .../resources/models/providers/openai.py | 36 +- src/gradientai/resources/regions.py | 42 +- src/gradientai/types/__init__.py | 40 +- src/gradientai/types/agent_create_params.py | 8 + src/gradientai/types/agent_create_response.py | 1 + src/gradientai/types/agent_delete_response.py | 1 + src/gradientai/types/agent_list_params.py | 6 +- src/gradientai/types/agent_list_response.py | 75 + .../types/agent_retrieve_response.py | 1 + src/gradientai/types/agent_update_params.py | 18 + src/gradientai/types/agent_update_response.py | 1 + .../types/agent_update_status_params.py | 11 + .../types/agent_update_status_response.py | 1 + src/gradientai/types/agents/__init__.py | 7 + .../types/agents/api_evaluation_metric.py | 16 +- .../agents/api_evaluation_metric_result.py | 17 + .../types/agents/api_evaluation_prompt.py | 7 + .../types/agents/api_evaluation_run.py | 18 + .../types/agents/api_evaluation_test_case.py | 24 +- .../types/agents/api_key_create_params.py | 2 + .../types/agents/api_key_create_response.py | 1 + .../types/agents/api_key_delete_response.py | 1 + .../types/agents/api_key_list_params.py | 4 +- .../types/agents/api_key_list_response.py | 3 + .../agents/api_key_regenerate_response.py | 1 + .../types/agents/api_key_update_params.py | 3 + .../types/agents/api_key_update_response.py | 1 + .../agents/api_link_knowledge_base_output.py | 1 + .../types/agents/api_star_metric.py | 6 + .../types/agents/api_star_metric_param.py | 6 + .../agents/chat/completion_create_params.py | 113 + .../agents/chat/completion_create_response.py | 54 +- ...reate_file_upload_presigned_urls_params.py | 1 + .../evaluation_metric_list_regions_params.py | 15 + ...evaluation_metric_list_regions_response.py | 29 + .../agents/evaluation_metrics/__init__.py | 2 + .../evaluation_metrics}/model_list_params.py | 8 +- .../evaluation_metrics/model_list_response.py | 21 + .../workspace_create_params.py | 3 + .../workspace_delete_response.py | 1 + .../workspace_list_response.py | 1 + .../workspace_update_params.py | 2 + .../workspaces/agent_list_params.py | 14 +- .../workspaces/agent_list_response.py | 2 + .../workspaces/agent_move_params.py | 2 + .../agents/evaluation_run_create_params.py | 1 + .../evaluation_run_list_results_params.py | 15 + .../evaluation_run_list_results_response.py | 8 + .../evaluation_test_case_list_response.py | 4 + .../evaluation_test_case_update_params.py | 1 + .../types/agents/function_create_params.py | 7 + .../types/agents/function_create_response.py | 1 + .../types/agents/function_delete_response.py | 1 + .../types/agents/function_update_params.py | 8 + .../types/agents/function_update_response.py | 1 + .../agents/knowledge_base_detach_response.py | 1 + .../types/agents/route_add_params.py | 2 + .../types/agents/route_add_response.py | 1 + .../types/agents/route_delete_response.py | 2 + .../types/agents/route_update_params.py | 4 + .../types/agents/route_update_response.py | 2 + .../types/agents/route_view_response.py | 1 + .../types/agents/version_list_params.py | 4 +- .../types/agents/version_list_response.py | 51 +- .../types/agents/version_update_params.py | 2 + .../types/agents/version_update_response.py | 1 + src/gradientai/types/api_agent.py | 104 + .../types/api_agent_api_key_info.py | 5 + src/gradientai/types/api_agent_model.py | 14 + .../types/api_anthropic_api_key_info.py | 6 + src/gradientai/types/api_knowledge_base.py | 10 + src/gradientai/types/api_model.py | 10 + src/gradientai/types/api_model_version.py | 3 + .../types/api_openai_api_key_info.py | 7 + src/gradientai/types/api_workspace.py | 10 + .../types/chat/completion_create_params.py | 113 + .../types/chat/completion_create_response.py | 54 +- src/gradientai/types/droplet_backup_policy.py | 28 + .../types/droplet_backup_policy_param.py | 21 + .../types/gpu_droplet_create_params.py | 213 ++ .../types/gpu_droplet_create_response.py | 39 + .../types/gpu_droplet_delete_by_tag_params.py | 12 + .../gpu_droplet_list_firewalls_params.py | 15 + .../gpu_droplet_list_firewalls_response.py | 19 + .../types/gpu_droplet_list_kernels_params.py | 15 + .../gpu_droplet_list_kernels_response.py | 19 + .../gpu_droplet_list_neighbors_response.py | 12 + .../types/gpu_droplet_list_params.py | 34 + .../types/gpu_droplet_list_response.py | 19 + .../gpu_droplet_list_snapshots_params.py | 15 + .../gpu_droplet_list_snapshots_response.py | 53 + .../types/gpu_droplet_retrieve_response.py | 12 + src/gradientai/types/gpu_droplets/__init__.py | 104 + .../types/gpu_droplets/account/__init__.py | 11 + .../gpu_droplets/account/key_create_params.py | 22 + .../account/key_create_response.py | 39 + .../gpu_droplets/account/key_list_params.py | 15 + .../gpu_droplets/account/key_list_response.py | 46 + .../account/key_retrieve_response.py | 39 + .../gpu_droplets/account/key_update_params.py | 15 + .../account/key_update_response.py | 39 + .../action_bulk_initiate_params.py | 72 + .../action_bulk_initiate_response.py | 12 + .../gpu_droplets/action_initiate_params.py | 278 +++ .../gpu_droplets/action_initiate_response.py | 12 + .../types/gpu_droplets/action_list_params.py | 15 + .../gpu_droplets/action_list_response.py | 19 + .../gpu_droplets/action_retrieve_response.py | 12 + .../types/gpu_droplets/associated_resource.py | 21 + .../gpu_droplets/autoscale_create_params.py | 28 + .../gpu_droplets/autoscale_create_response.py | 12 + .../autoscale_list_history_params.py | 15 + .../autoscale_list_history_response.py | 48 + .../autoscale_list_members_params.py | 15 + .../autoscale_list_members_response.py | 47 + .../gpu_droplets/autoscale_list_params.py | 18 + .../gpu_droplets/autoscale_list_response.py | 19 + .../types/gpu_droplets/autoscale_pool.py | 54 + .../autoscale_pool_droplet_template.py | 69 + .../autoscale_pool_droplet_template_param.py | 84 + .../autoscale_pool_dynamic_config.py | 27 + .../autoscale_pool_dynamic_config_param.py | 27 + .../autoscale_pool_static_config.py | 10 + .../autoscale_pool_static_config_param.py | 12 + .../autoscale_retrieve_response.py | 12 + .../gpu_droplets/autoscale_update_params.py | 28 + .../gpu_droplets/autoscale_update_response.py | 12 + .../types/gpu_droplets/backup_list_params.py | 15 + .../backup_list_policies_params.py | 15 + .../backup_list_policies_response.py | 41 + .../gpu_droplets/backup_list_response.py | 53 + ...backup_list_supported_policies_response.py | 28 + .../backup_retrieve_policy_response.py | 30 + .../types/gpu_droplets/current_utilization.py | 15 + ...sociated_resource_check_status_response.py | 41 + ...ciated_resource_delete_selective_params.py | 34 + ..._with_associated_resource_list_response.py | 37 + .../destroyed_associated_resource.py | 28 + src/gradientai/types/gpu_droplets/domains.py | 22 + .../types/gpu_droplets/domains_param.py | 22 + src/gradientai/types/gpu_droplets/firewall.py | 98 + .../gpu_droplets/firewall_create_params.py | 17 + .../gpu_droplets/firewall_create_response.py | 12 + .../gpu_droplets/firewall_list_params.py | 15 + .../gpu_droplets/firewall_list_response.py | 19 + .../types/gpu_droplets/firewall_param.py | 67 + .../firewall_retrieve_response.py | 12 + .../gpu_droplets/firewall_update_params.py | 13 + .../gpu_droplets/firewall_update_response.py | 12 + .../types/gpu_droplets/firewalls/__init__.py | 10 + .../firewalls/droplet_add_params.py | 13 + .../firewalls/droplet_remove_params.py | 13 + .../gpu_droplets/firewalls/rule_add_params.py | 46 + .../firewalls/rule_remove_params.py | 46 + .../gpu_droplets/firewalls/tag_add_params.py | 18 + .../firewalls/tag_remove_params.py | 18 + .../types/gpu_droplets/floating_ip.py | 47 + .../gpu_droplets/floating_ip_create_params.py | 24 + .../floating_ip_create_response.py | 21 + .../gpu_droplets/floating_ip_list_params.py | 15 + .../gpu_droplets/floating_ip_list_response.py | 19 + .../floating_ip_retrieve_response.py | 12 + .../gpu_droplets/floating_ips/__init__.py | 8 + .../floating_ips/action_create_params.py | 24 + .../floating_ips/action_create_response.py | 17 + .../floating_ips/action_list_response.py | 19 + .../floating_ips/action_retrieve_response.py | 17 + .../types/gpu_droplets/forwarding_rule.py | 49 + .../gpu_droplets/forwarding_rule_param.py | 48 + .../types/gpu_droplets/glb_settings.py | 45 + .../types/gpu_droplets/glb_settings_param.py | 45 + .../types/gpu_droplets/health_check.py | 49 + .../types/gpu_droplets/health_check_param.py | 48 + .../types/gpu_droplets/image_create_params.py | 81 + .../gpu_droplets/image_create_response.py | 12 + .../types/gpu_droplets/image_list_params.py | 27 + .../types/gpu_droplets/image_list_response.py | 19 + .../gpu_droplets/image_retrieve_response.py | 10 + .../types/gpu_droplets/image_update_params.py | 42 + .../gpu_droplets/image_update_response.py | 10 + .../types/gpu_droplets/images/__init__.py | 6 + .../images/action_create_params.py | 45 + .../images/action_list_response.py | 19 + .../types/gpu_droplets/lb_firewall.py | 21 + .../types/gpu_droplets/lb_firewall_param.py | 22 + .../types/gpu_droplets/load_balancer.py | 185 ++ .../load_balancer_create_params.py | 335 +++ .../load_balancer_create_response.py | 12 + .../gpu_droplets/load_balancer_list_params.py | 15 + .../load_balancer_list_response.py | 19 + .../load_balancer_retrieve_response.py | 12 + .../load_balancer_update_params.py | 335 +++ .../load_balancer_update_response.py | 12 + .../gpu_droplets/load_balancers/__init__.py | 8 + .../load_balancers/droplet_add_params.py | 13 + .../load_balancers/droplet_remove_params.py | 13 + .../forwarding_rule_add_params.py | 14 + .../forwarding_rule_remove_params.py | 14 + .../types/gpu_droplets/size_list_params.py | 15 + .../types/gpu_droplets/size_list_response.py | 19 + .../gpu_droplets/snapshot_list_params.py | 18 + .../gpu_droplets/snapshot_list_response.py | 19 + .../snapshot_retrieve_response.py | 12 + .../types/gpu_droplets/sticky_sessions.py | 30 + .../gpu_droplets/sticky_sessions_param.py | 29 + .../gpu_droplets/volume_create_params.py | 153 ++ .../gpu_droplets/volume_create_response.py | 65 + .../volume_delete_by_name_params.py | 31 + .../types/gpu_droplets/volume_list_params.py | 37 + .../gpu_droplets/volume_list_response.py | 73 + .../gpu_droplets/volume_retrieve_response.py | 65 + .../types/gpu_droplets/volumes/__init__.py | 18 + .../volumes/action_initiate_by_id_params.py | 133 + .../volumes/action_initiate_by_id_response.py | 12 + .../volumes/action_initiate_by_name_params.py | 97 + .../action_initiate_by_name_response.py | 12 + .../volumes/action_list_params.py | 15 + .../volumes/action_list_response.py | 19 + .../volumes/action_retrieve_params.py | 17 + .../volumes/action_retrieve_response.py | 12 + .../volumes/snapshot_create_params.py | 21 + .../volumes/snapshot_create_response.py | 12 + .../volumes/snapshot_list_params.py | 15 + .../volumes/snapshot_list_response.py | 19 + .../volumes/snapshot_retrieve_response.py | 12 + .../gpu_droplets/volumes/volume_action.py | 18 + .../types/inference/api_key_create_params.py | 1 + .../inference/api_key_create_response.py | 1 + .../inference/api_key_delete_response.py | 1 + .../types/inference/api_key_list_params.py | 4 +- .../types/inference/api_key_list_response.py | 3 + .../types/inference/api_key_update_params.py | 2 + .../api_key_update_regenerate_response.py | 1 + .../inference/api_key_update_response.py | 1 + .../types/inference/api_model_api_key_info.py | 5 + .../types/knowledge_base_create_params.py | 6 + .../types/knowledge_base_create_response.py | 1 + .../types/knowledge_base_delete_response.py | 1 + .../types/knowledge_base_list_params.py | 4 +- .../types/knowledge_base_list_response.py | 3 + .../types/knowledge_base_retrieve_response.py | 1 + .../types/knowledge_base_update_params.py | 5 +- .../types/knowledge_base_update_response.py | 1 + .../api_file_upload_data_source.py | 3 + .../api_file_upload_data_source_param.py | 3 + .../api_indexed_data_source.py | 13 + .../types/knowledge_bases/api_indexing_job.py | 7 + .../api_knowledge_base_data_source.py | 12 + .../knowledge_bases/api_spaces_data_source.py | 2 + .../api_spaces_data_source_param.py | 2 + .../knowledge_bases/aws_data_source_param.py | 4 + .../data_source_create_params.py | 4 + .../data_source_create_response.py | 1 + .../data_source_delete_response.py | 2 + .../data_source_list_params.py | 4 +- .../data_source_list_response.py | 3 + .../indexing_job_create_params.py | 5 + .../indexing_job_create_response.py | 1 + .../indexing_job_list_params.py | 4 +- .../indexing_job_list_response.py | 3 + .../indexing_job_retrieve_response.py | 1 + .../indexing_job_update_cancel_response.py | 1 + src/gradientai/types/model_list_response.py | 28 +- .../types/model_retrieve_response.py | 21 + .../providers/anthropic_create_params.py | 2 + .../providers/anthropic_create_response.py | 1 + .../providers/anthropic_delete_response.py | 1 + .../providers/anthropic_list_agents_params.py | 4 +- .../anthropic_list_agents_response.py | 2 + .../models/providers/anthropic_list_params.py | 4 +- .../providers/anthropic_list_response.py | 3 + .../providers/anthropic_retrieve_response.py | 1 + .../providers/anthropic_update_params.py | 3 + .../providers/anthropic_update_response.py | 1 + .../models/providers/openai_create_params.py | 2 + .../providers/openai_create_response.py | 1 + .../providers/openai_delete_response.py | 1 + .../models/providers/openai_list_params.py | 4 +- .../models/providers/openai_list_response.py | 3 + .../openai_retrieve_agents_params.py | 4 +- .../openai_retrieve_agents_response.py | 2 + .../providers/openai_retrieve_response.py | 1 + .../models/providers/openai_update_params.py | 3 + .../providers/openai_update_response.py | 1 + src/gradientai/types/region_list_params.py | 8 +- src/gradientai/types/region_list_response.py | 22 +- src/gradientai/types/shared/__init__.py | 23 + src/gradientai/types/shared/action.py | 51 + src/gradientai/types/shared/action_link.py | 18 + src/gradientai/types/shared/api_links.py | 5 + src/gradientai/types/shared/api_meta.py | 3 + src/gradientai/types/shared/backward_links.py | 15 + .../types/shared/chat_completion_chunk.py | 55 +- .../types/shared/completion_usage.py | 16 + src/gradientai/types/shared/disk_info.py | 27 + src/gradientai/types/shared/droplet.py | 143 ++ .../shared/droplet_next_backup_window.py | 22 + .../types/shared/firewall_rule_target.py | 41 + src/gradientai/types/shared/forward_links.py | 15 + .../types/shared/garbage_collection.py | 43 + src/gradientai/types/shared/gpu_info.py | 25 + src/gradientai/types/shared/image.py | 131 + src/gradientai/types/shared/kernel.py | 25 + .../types/shared/meta_properties.py | 12 + src/gradientai/types/shared/network_v4.py | 26 + src/gradientai/types/shared/network_v6.py | 25 + src/gradientai/types/shared/page_links.py | 16 + src/gradientai/types/shared/region.py | 36 + src/gradientai/types/shared/size.py | 79 + src/gradientai/types/shared/snapshots.py | 47 + src/gradientai/types/shared/subscription.py | 19 + .../types/shared/subscription_tier_base.py | 44 + src/gradientai/types/shared/vpc_peering.py | 30 + .../types/shared_params/__init__.py | 3 + .../shared_params/firewall_rule_target.py | 42 + .../agents/chat/test_completions.py | 44 + .../agents/evaluation_metrics/test_models.py | 102 + .../evaluation_metrics/test_workspaces.py | 40 +- .../workspaces/test_agents.py | 42 +- tests/api_resources/agents/test_api_keys.py | 156 +- .../agents/test_evaluation_datasets.py | 20 +- .../agents/test_evaluation_metrics.py | 79 +- .../agents/test_evaluation_runs.py | 76 +- .../agents/test_evaluation_test_cases.py | 116 +- tests/api_resources/agents/test_functions.py | 132 +- .../agents/test_knowledge_bases.py | 64 +- tests/api_resources/agents/test_routes.py | 148 +- tests/api_resources/agents/test_versions.py | 40 +- tests/api_resources/chat/test_completions.py | 44 + tests/api_resources/gpu_droplets/__init__.py | 1 + .../gpu_droplets/account/__init__.py | 1 + .../gpu_droplets/account/test_keys.py | 399 +++ .../gpu_droplets/firewalls/__init__.py | 1 + .../gpu_droplets/firewalls/test_droplets.py | 206 ++ .../gpu_droplets/firewalls/test_rules.py | 326 +++ .../gpu_droplets/firewalls/test_tags.py | 206 ++ .../gpu_droplets/floating_ips/__init__.py | 1 + .../gpu_droplets/floating_ips/test_actions.py | 396 +++ .../gpu_droplets/images/__init__.py | 1 + .../gpu_droplets/images/test_actions.py | 321 +++ .../gpu_droplets/load_balancers/__init__.py | 1 + .../load_balancers/test_droplets.py | 206 ++ .../load_balancers/test_forwarding_rules.py | 318 +++ .../gpu_droplets/test_actions.py | 1209 +++++++++ .../gpu_droplets/test_autoscale.py | 953 +++++++ .../gpu_droplets/test_backups.py | 315 +++ .../test_destroy_with_associated_resources.py | 431 ++++ .../gpu_droplets/test_firewalls.py | 617 +++++ .../gpu_droplets/test_floating_ips.py | 424 ++++ .../api_resources/gpu_droplets/test_images.py | 417 ++++ .../gpu_droplets/test_load_balancers.py | 1443 +++++++++++ .../api_resources/gpu_droplets/test_sizes.py | 98 + .../gpu_droplets/test_snapshots.py | 236 ++ .../gpu_droplets/test_volumes.py | 568 +++++ .../gpu_droplets/volumes/__init__.py | 1 + .../gpu_droplets/volumes/test_actions.py | 825 ++++++ .../gpu_droplets/volumes/test_snapshots.py | 412 +++ .../api_resources/inference/test_api_keys.py | 28 +- .../knowledge_bases/test_data_sources.py | 104 +- .../knowledge_bases/test_indexing_jobs.py | 28 +- .../models/providers/test_anthropic.py | 52 +- .../models/providers/test_openai.py | 52 +- tests/api_resources/test_agents.py | 130 +- tests/api_resources/test_gpu_droplets.py | 912 +++++++ tests/api_resources/test_knowledge_bases.py | 128 +- tests/api_resources/test_models.py | 100 +- tests/api_resources/test_regions.py | 8 +- 421 files changed, 39542 insertions(+), 1006 deletions(-) create mode 100644 src/gradientai/resources/agents/evaluation_metrics/models.py create mode 100644 src/gradientai/resources/gpu_droplets/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/account/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/account/account.py create mode 100644 src/gradientai/resources/gpu_droplets/account/keys.py create mode 100644 src/gradientai/resources/gpu_droplets/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/autoscale.py create mode 100644 src/gradientai/resources/gpu_droplets/backups.py create mode 100644 src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/droplets.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/firewalls.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/rules.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/tags.py create mode 100644 src/gradientai/resources/gpu_droplets/floating_ips/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/floating_ips/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py create mode 100644 src/gradientai/resources/gpu_droplets/gpu_droplets.py create mode 100644 src/gradientai/resources/gpu_droplets/images/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/images/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/images/images.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/droplets.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py create mode 100644 src/gradientai/resources/gpu_droplets/sizes.py create mode 100644 src/gradientai/resources/gpu_droplets/snapshots.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/snapshots.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/volumes.py create mode 100644 src/gradientai/types/agents/evaluation_metric_list_regions_params.py create mode 100644 src/gradientai/types/agents/evaluation_metric_list_regions_response.py rename src/gradientai/types/{ => agents/evaluation_metrics}/model_list_params.py (87%) create mode 100644 src/gradientai/types/agents/evaluation_metrics/model_list_response.py create mode 100644 src/gradientai/types/agents/evaluation_run_list_results_params.py create mode 100644 src/gradientai/types/droplet_backup_policy.py create mode 100644 src/gradientai/types/droplet_backup_policy_param.py create mode 100644 src/gradientai/types/gpu_droplet_create_params.py create mode 100644 src/gradientai/types/gpu_droplet_create_response.py create mode 100644 src/gradientai/types/gpu_droplet_delete_by_tag_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_firewalls_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_firewalls_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_kernels_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_kernels_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_neighbors_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_snapshots_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_snapshots_response.py create mode 100644 src/gradientai/types/gpu_droplet_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/account/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py create mode 100644 src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_initiate_params.py create mode 100644 src/gradientai/types/gpu_droplets/action_initiate_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/associated_resource.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_history_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_history_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_members_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_members_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_policies_params.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_policies_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py create mode 100644 src/gradientai/types/gpu_droplets/current_utilization.py create mode 100644 src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py create mode 100644 src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py create mode 100644 src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/destroyed_associated_resource.py create mode 100644 src/gradientai/types/gpu_droplets/domains.py create mode 100644 src/gradientai/types/gpu_droplets/domains_param.py create mode 100644 src/gradientai/types/gpu_droplets/firewall.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_param.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/forwarding_rule.py create mode 100644 src/gradientai/types/gpu_droplets/forwarding_rule_param.py create mode 100644 src/gradientai/types/gpu_droplets/glb_settings.py create mode 100644 src/gradientai/types/gpu_droplets/glb_settings_param.py create mode 100644 src/gradientai/types/gpu_droplets/health_check.py create mode 100644 src/gradientai/types/gpu_droplets/health_check_param.py create mode 100644 src/gradientai/types/gpu_droplets/image_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/image_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/image_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/image_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/image_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/image_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/image_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/images/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/images/action_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/images/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/lb_firewall.py create mode 100644 src/gradientai/types/gpu_droplets/lb_firewall_param.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/size_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/size_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/snapshot_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/snapshot_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/sticky_sessions.py create mode 100644 src/gradientai/types/gpu_droplets/sticky_sessions_param.py create mode 100644 src/gradientai/types/gpu_droplets/volume_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/volume_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py create mode 100644 src/gradientai/types/gpu_droplets/volume_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/volume_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/volume_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/volume_action.py create mode 100644 src/gradientai/types/model_retrieve_response.py create mode 100644 src/gradientai/types/shared/action.py create mode 100644 src/gradientai/types/shared/action_link.py create mode 100644 src/gradientai/types/shared/backward_links.py create mode 100644 src/gradientai/types/shared/completion_usage.py create mode 100644 src/gradientai/types/shared/disk_info.py create mode 100644 src/gradientai/types/shared/droplet.py create mode 100644 src/gradientai/types/shared/droplet_next_backup_window.py create mode 100644 src/gradientai/types/shared/firewall_rule_target.py create mode 100644 src/gradientai/types/shared/forward_links.py create mode 100644 src/gradientai/types/shared/garbage_collection.py create mode 100644 src/gradientai/types/shared/gpu_info.py create mode 100644 src/gradientai/types/shared/image.py create mode 100644 src/gradientai/types/shared/kernel.py create mode 100644 src/gradientai/types/shared/meta_properties.py create mode 100644 src/gradientai/types/shared/network_v4.py create mode 100644 src/gradientai/types/shared/network_v6.py create mode 100644 src/gradientai/types/shared/page_links.py create mode 100644 src/gradientai/types/shared/region.py create mode 100644 src/gradientai/types/shared/size.py create mode 100644 src/gradientai/types/shared/snapshots.py create mode 100644 src/gradientai/types/shared/subscription.py create mode 100644 src/gradientai/types/shared/subscription_tier_base.py create mode 100644 src/gradientai/types/shared/vpc_peering.py create mode 100644 src/gradientai/types/shared_params/__init__.py create mode 100644 src/gradientai/types/shared_params/firewall_rule_target.py create mode 100644 tests/api_resources/agents/evaluation_metrics/test_models.py create mode 100644 tests/api_resources/gpu_droplets/__init__.py create mode 100644 tests/api_resources/gpu_droplets/account/__init__.py create mode 100644 tests/api_resources/gpu_droplets/account/test_keys.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/__init__.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/test_droplets.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/test_rules.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/test_tags.py create mode 100644 tests/api_resources/gpu_droplets/floating_ips/__init__.py create mode 100644 tests/api_resources/gpu_droplets/floating_ips/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/images/__init__.py create mode 100644 tests/api_resources/gpu_droplets/images/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/load_balancers/__init__.py create mode 100644 tests/api_resources/gpu_droplets/load_balancers/test_droplets.py create mode 100644 tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py create mode 100644 tests/api_resources/gpu_droplets/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/test_autoscale.py create mode 100644 tests/api_resources/gpu_droplets/test_backups.py create mode 100644 tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py create mode 100644 tests/api_resources/gpu_droplets/test_firewalls.py create mode 100644 tests/api_resources/gpu_droplets/test_floating_ips.py create mode 100644 tests/api_resources/gpu_droplets/test_images.py create mode 100644 tests/api_resources/gpu_droplets/test_load_balancers.py create mode 100644 tests/api_resources/gpu_droplets/test_sizes.py create mode 100644 tests/api_resources/gpu_droplets/test_snapshots.py create mode 100644 tests/api_resources/gpu_droplets/test_volumes.py create mode 100644 tests/api_resources/gpu_droplets/volumes/__init__.py create mode 100644 tests/api_resources/gpu_droplets/volumes/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/volumes/test_snapshots.py create mode 100644 tests/api_resources/test_gpu_droplets.py diff --git a/.stats.yml b/.stats.yml index 89f80bc1..5f9d16dd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 77 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml -openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 0bd094d86a010f7cbd5eb22ef548a29f +configured_endpoints: 168 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml +openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d +config_hash: 683ea6ba4d63037c1c72484e5936e73c diff --git a/api.md b/api.md index c6acd4ec..8682940b 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,35 @@ # Shared Types ```python -from gradientai.types import APILinks, APIMeta, ChatCompletionChunk, ChatCompletionTokenLogprob +from gradientai.types import ( + Action, + ActionLink, + APILinks, + APIMeta, + BackwardLinks, + ChatCompletionChunk, + ChatCompletionTokenLogprob, + CompletionUsage, + DiskInfo, + Droplet, + DropletNextBackupWindow, + FirewallRuleTarget, + ForwardLinks, + GarbageCollection, + GPUInfo, + Image, + Kernel, + MetaProperties, + NetworkV4, + NetworkV6, + PageLinks, + Region, + Size, + Snapshots, + Subscription, + SubscriptionTierBase, + VpcPeering, +) ``` # Agents @@ -77,12 +105,16 @@ Methods: Types: ```python -from gradientai.types.agents import EvaluationMetricListResponse +from gradientai.types.agents import ( + EvaluationMetricListResponse, + EvaluationMetricListRegionsResponse, +) ``` Methods: - client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces @@ -124,6 +156,18 @@ Methods: - client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse - client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +### Models + +Types: + +```python +from gradientai.types.agents.evaluation_metrics import ModelListResponse +``` + +Methods: + +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse + ## EvaluationRuns Types: @@ -145,7 +189,7 @@ Methods: - client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse - client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse - client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases @@ -168,7 +212,7 @@ Methods: - client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse - client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse - client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse - client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse @@ -277,7 +321,7 @@ from gradientai.types import RegionListResponse Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases @@ -379,12 +423,19 @@ Methods: Types: ```python -from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse +from gradientai.types import ( + APIAgreement, + APIModel, + APIModelVersion, + ModelRetrieveResponse, + ModelListResponse, +) ``` Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -435,3 +486,392 @@ Methods: - client.models.providers.openai.list(\*\*params) -> OpenAIListResponse - client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse - client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse + +# GPUDroplets + +Types: + +```python +from gradientai.types import ( + DropletBackupPolicy, + GPUDropletCreateResponse, + GPUDropletRetrieveResponse, + GPUDropletListResponse, + GPUDropletListFirewallsResponse, + GPUDropletListKernelsResponse, + GPUDropletListNeighborsResponse, + GPUDropletListSnapshotsResponse, +) +``` + +Methods: + +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse + +## Backups + +Types: + +```python +from gradientai.types.gpu_droplets import ( + BackupListResponse, + BackupListPoliciesResponse, + BackupListSupportedPoliciesResponse, + BackupRetrievePolicyResponse, +) +``` + +Methods: + +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse + +## Actions + +Types: + +```python +from gradientai.types.gpu_droplets import ( + ActionRetrieveResponse, + ActionListResponse, + ActionBulkInitiateResponse, + ActionInitiateResponse, +) +``` + +Methods: + +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse + +## DestroyWithAssociatedResources + +Types: + +```python +from gradientai.types.gpu_droplets import ( + AssociatedResource, + DestroyedAssociatedResource, + DestroyWithAssociatedResourceListResponse, + DestroyWithAssociatedResourceCheckStatusResponse, +) +``` + +Methods: + +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None + +## Autoscale + +Types: + +```python +from gradientai.types.gpu_droplets import ( + AutoscalePool, + AutoscalePoolDropletTemplate, + AutoscalePoolDynamicConfig, + AutoscalePoolStaticConfig, + CurrentUtilization, + AutoscaleCreateResponse, + AutoscaleRetrieveResponse, + AutoscaleUpdateResponse, + AutoscaleListResponse, + AutoscaleListHistoryResponse, + AutoscaleListMembersResponse, +) +``` + +Methods: + +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse + +## Firewalls + +Types: + +```python +from gradientai.types.gpu_droplets import ( + Firewall, + FirewallCreateResponse, + FirewallRetrieveResponse, + FirewallUpdateResponse, + FirewallListResponse, +) +``` + +Methods: + +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None + +### Droplets + +Methods: + +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None + +### Tags + +Methods: + +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None + +### Rules + +Methods: + +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None + +## FloatingIPs + +Types: + +```python +from gradientai.types.gpu_droplets import ( + FloatingIP, + FloatingIPCreateResponse, + FloatingIPRetrieveResponse, + FloatingIPListResponse, +) +``` + +Methods: + +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None + +### Actions + +Types: + +```python +from gradientai.types.gpu_droplets.floating_ips import ( + ActionCreateResponse, + ActionRetrieveResponse, + ActionListResponse, +) +``` + +Methods: + +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse + +## Images + +Types: + +```python +from gradientai.types.gpu_droplets import ( + ImageCreateResponse, + ImageRetrieveResponse, + ImageUpdateResponse, + ImageListResponse, +) +``` + +Methods: + +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None + +### Actions + +Types: + +```python +from gradientai.types.gpu_droplets.images import ActionListResponse +``` + +Methods: + +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse + +## LoadBalancers + +Types: + +```python +from gradientai.types.gpu_droplets import ( + Domains, + ForwardingRule, + GlbSettings, + HealthCheck, + LbFirewall, + LoadBalancer, + StickySessions, + LoadBalancerCreateResponse, + LoadBalancerRetrieveResponse, + LoadBalancerUpdateResponse, + LoadBalancerListResponse, +) +``` + +Methods: + +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None + +### Droplets + +Methods: + +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None + +### ForwardingRules + +Methods: + +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None + +## Sizes + +Types: + +```python +from gradientai.types.gpu_droplets import SizeListResponse +``` + +Methods: + +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse + +## Snapshots + +Types: + +```python +from gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +``` + +Methods: + +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None + +## Volumes + +Types: + +```python +from gradientai.types.gpu_droplets import ( + VolumeCreateResponse, + VolumeRetrieveResponse, + VolumeListResponse, +) +``` + +Methods: + +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None + +### Actions + +Types: + +```python +from gradientai.types.gpu_droplets.volumes import ( + VolumeAction, + ActionRetrieveResponse, + ActionListResponse, + ActionInitiateByIDResponse, + ActionInitiateByNameResponse, +) +``` + +Methods: + +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse + +### Snapshots + +Types: + +```python +from gradientai.types.gpu_droplets.volumes import ( + SnapshotCreateResponse, + SnapshotRetrieveResponse, + SnapshotListResponse, +) +``` + +Methods: + +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None + +## Account + +### Keys + +Types: + +```python +from gradientai.types.gpu_droplets.account import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, +) +``` + +Methods: + +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index c696258b..92229a05 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -32,12 +32,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, knowledge_bases + from .resources import chat, agents, models, regions, inference, gpu_droplets, knowledge_bases from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource + from .resources.gpu_droplets.gpu_droplets import GPUDropletsResource, AsyncGPUDropletsResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ @@ -161,6 +162,12 @@ def models(self) -> ModelsResource: return ModelsResource(self) + @cached_property + def gpu_droplets(self) -> GPUDropletsResource: + from .resources.gpu_droplets import GPUDropletsResource + + return GPUDropletsResource(self) + @cached_property def with_raw_response(self) -> GradientAIWithRawResponse: return GradientAIWithRawResponse(self) @@ -404,6 +411,12 @@ def models(self) -> AsyncModelsResource: return AsyncModelsResource(self) + @cached_property + def gpu_droplets(self) -> AsyncGPUDropletsResource: + from .resources.gpu_droplets import AsyncGPUDropletsResource + + return AsyncGPUDropletsResource(self) + @cached_property def with_raw_response(self) -> AsyncGradientAIWithRawResponse: return AsyncGradientAIWithRawResponse(self) @@ -580,6 +593,12 @@ def models(self) -> models.ModelsResourceWithRawResponse: return ModelsResourceWithRawResponse(self._client.models) + @cached_property + def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: + from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse + + return GPUDropletsResourceWithRawResponse(self._client.gpu_droplets) + class AsyncGradientAIWithRawResponse: _client: AsyncGradientAI @@ -623,6 +642,12 @@ def models(self) -> models.AsyncModelsResourceWithRawResponse: return AsyncModelsResourceWithRawResponse(self._client.models) + @cached_property + def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: + from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse + + return AsyncGPUDropletsResourceWithRawResponse(self._client.gpu_droplets) + class GradientAIWithStreamedResponse: _client: GradientAI @@ -666,6 +691,12 @@ def models(self) -> models.ModelsResourceWithStreamingResponse: return ModelsResourceWithStreamingResponse(self._client.models) + @cached_property + def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: + from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse + + return GPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) + class AsyncGradientAIWithStreamedResponse: _client: AsyncGradientAI @@ -709,6 +740,12 @@ def models(self) -> models.AsyncModelsResourceWithStreamingResponse: return AsyncModelsResourceWithStreamingResponse(self._client.models) + @cached_property + def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse: + from .resources.gpu_droplets import AsyncGPUDropletsResourceWithStreamingResponse + + return AsyncGPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) + Client = GradientAI diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index e1ed4a00..fd6da608 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -40,6 +40,14 @@ InferenceResourceWithStreamingResponse, AsyncInferenceResourceWithStreamingResponse, ) +from .gpu_droplets import ( + GPUDropletsResource, + AsyncGPUDropletsResource, + GPUDropletsResourceWithRawResponse, + AsyncGPUDropletsResourceWithRawResponse, + GPUDropletsResourceWithStreamingResponse, + AsyncGPUDropletsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -86,4 +94,10 @@ "AsyncModelsResourceWithRawResponse", "ModelsResourceWithStreamingResponse", "AsyncModelsResourceWithStreamingResponse", + "GPUDropletsResource", + "AsyncGPUDropletsResource", + "GPUDropletsResourceWithRawResponse", + "AsyncGPUDropletsResourceWithRawResponse", + "GPUDropletsResourceWithStreamingResponse", + "AsyncGPUDropletsResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 200e9fc0..92d696ba 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -203,13 +203,29 @@ def create( body contains a JSON object with the newly created agent object. Args: + anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models + + description: A text description of the agent, not used in inference + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent + model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + region: The DigitalOcean region to deploy your agent in + + tags: Agent tag to organize related resources + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -285,6 +301,7 @@ def update( path_uuid: str, *, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, k: int | NotGiven = NOT_GIVEN, @@ -312,17 +329,39 @@ def update( response body is a JSON object containing the agent. Args: + anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models + + conversation_logs_enabled: Optional update of conversation logs enabled + + description: Agent description + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + k: How many results should be considered from an attached knowledge base + max_tokens: Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + + tags: A set of abitrary tags to organize your agent + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. @@ -331,6 +370,8 @@ def update( number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. + body_uuid: Unique agent id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -348,6 +389,7 @@ def update( body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, + "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, @@ -388,11 +430,11 @@ def list( To list all agents, send a GET request to `/v2/gen-ai/agents`. Args: - only_deployed: only list agents that are deployed. + only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -477,6 +519,17 @@ def update_status( PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. Args: + body_uuid: Unique id + + visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -591,13 +644,29 @@ async def create( body contains a JSON object with the newly created agent object. Args: + anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models + + description: A text description of the agent, not used in inference + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent + model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + region: The DigitalOcean region to deploy your agent in + + tags: Agent tag to organize related resources + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -673,6 +742,7 @@ async def update( path_uuid: str, *, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, k: int | NotGiven = NOT_GIVEN, @@ -700,17 +770,39 @@ async def update( response body is a JSON object containing the agent. Args: + anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models + + conversation_logs_enabled: Optional update of conversation logs enabled + + description: Agent description + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + k: How many results should be considered from an attached knowledge base + max_tokens: Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + + tags: A set of abitrary tags to organize your agent + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. @@ -719,6 +811,8 @@ async def update( number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. + body_uuid: Unique agent id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -736,6 +830,7 @@ async def update( body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, + "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, @@ -776,11 +871,11 @@ async def list( To list all agents, send a GET request to `/v2/gen-ai/agents`. Args: - only_deployed: only list agents that are deployed. + only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -865,6 +960,17 @@ async def update_status( PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. Args: + body_uuid: Unique id + + visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 1cf2278e..9f4d9660 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -63,6 +63,10 @@ def create( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: + body_agent_uuid: Agent id + + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -110,6 +114,12 @@ def update( `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. Args: + body_agent_uuid: Agent id + + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -158,9 +168,9 @@ def list( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -309,6 +319,10 @@ async def create( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: + body_agent_uuid: Agent id + + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -356,6 +370,12 @@ async def update( `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. Args: + body_agent_uuid: Agent id + + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -404,9 +424,9 @@ async def list( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 4ec70d30..bc6f3084 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -64,6 +64,8 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -136,6 +138,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -177,6 +192,8 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -249,6 +266,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -290,6 +320,8 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -362,6 +394,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -403,6 +448,8 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -433,6 +480,8 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, @@ -488,6 +537,8 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -560,6 +611,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -601,6 +665,8 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -673,6 +739,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -714,6 +793,8 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -786,6 +867,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -827,6 +921,8 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -857,6 +953,8 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, diff --git a/src/gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/__init__.py index 1c0ec1ea..ce687621 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/__init__.py +++ b/src/gradientai/resources/agents/evaluation_metrics/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) from .workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -24,6 +32,12 @@ "AsyncWorkspacesResourceWithRawResponse", "WorkspacesResourceWithStreamingResponse", "AsyncWorkspacesResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py index ce549527..edf708df 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -4,7 +4,16 @@ import httpx +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import ( @@ -14,6 +23,7 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options +from ....types.agents import evaluation_metric_list_regions_params from .workspaces.workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -23,6 +33,7 @@ AsyncWorkspacesResourceWithStreamingResponse, ) from ....types.agents.evaluation_metric_list_response import EvaluationMetricListResponse +from ....types.agents.evaluation_metric_list_regions_response import EvaluationMetricListRegionsResponse __all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"] @@ -32,6 +43,10 @@ class EvaluationMetricsResource(SyncAPIResource): def workspaces(self) -> WorkspacesResource: return WorkspacesResource(self._client) + @cached_property + def models(self) -> ModelsResource: + return ModelsResource(self._client) + @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -75,12 +90,64 @@ def list( cast_to=EvaluationMetricListResponse, ) + def list_regions( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationMetricListRegionsResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: Include datacenters that are capable of running batch jobs. + + serves_inference: Include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams, + ), + ), + cast_to=EvaluationMetricListRegionsResponse, + ) + class AsyncEvaluationMetricsResource(AsyncAPIResource): @cached_property def workspaces(self) -> AsyncWorkspacesResource: return AsyncWorkspacesResource(self._client) + @cached_property + def models(self) -> AsyncModelsResource: + return AsyncModelsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -124,6 +191,54 @@ async def list( cast_to=EvaluationMetricListResponse, ) + async def list_regions( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationMetricListRegionsResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: Include datacenters that are capable of running batch jobs. + + serves_inference: Include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams, + ), + ), + cast_to=EvaluationMetricListRegionsResponse, + ) + class EvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -132,11 +247,18 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: self.list = to_raw_response_wrapper( evaluation_metrics.list, ) + self.list_regions = to_raw_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> WorkspacesResourceWithRawResponse: return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) + @cached_property + def models(self) -> ModelsResourceWithRawResponse: + return ModelsResourceWithRawResponse(self._evaluation_metrics.models) + class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -145,11 +267,18 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_raw_response_wrapper( evaluation_metrics.list, ) + self.list_regions = async_to_raw_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse: return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) + @cached_property + def models(self) -> AsyncModelsResourceWithRawResponse: + return AsyncModelsResourceWithRawResponse(self._evaluation_metrics.models) + class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -158,11 +287,18 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: self.list = to_streamed_response_wrapper( evaluation_metrics.list, ) + self.list_regions = to_streamed_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> WorkspacesResourceWithStreamingResponse: return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) + @cached_property + def models(self) -> ModelsResourceWithStreamingResponse: + return ModelsResourceWithStreamingResponse(self._evaluation_metrics.models) + class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -171,7 +307,14 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_streamed_response_wrapper( evaluation_metrics.list, ) + self.list_regions = async_to_streamed_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse: return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) + + @cached_property + def models(self) -> AsyncModelsResourceWithStreamingResponse: + return AsyncModelsResourceWithStreamingResponse(self._evaluation_metrics.models) diff --git a/src/gradientai/resources/agents/evaluation_metrics/models.py b/src/gradientai/resources/agents/evaluation_metrics/models.py new file mode 100644 index 00000000..20a44a22 --- /dev/null +++ b/src/gradientai/resources/agents/evaluation_metrics/models.py @@ -0,0 +1,254 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.agents.evaluation_metrics import model_list_params +from ....types.agents.evaluation_metrics.model_list_response import ModelListResponse + +__all__ = ["ModelsResource", "AsyncModelsResource"] + + +class ModelsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ModelsResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: Page number. + + per_page: Items per page. + + public_only: Only include models that are publicly available. + + usecases: Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), + ), + cast_to=ModelListResponse, + ) + + +class AsyncModelsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncModelsResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: Page number. + + per_page: Items per page. + + public_only: Only include models that are publicly available. + + usecases: Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), + ), + cast_to=ModelListResponse, + ) + + +class ModelsResourceWithRawResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.list = to_raw_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithRawResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.list = async_to_raw_response_wrapper( + models.list, + ) + + +class ModelsResourceWithStreamingResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.list = to_streamed_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithStreamingResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.list = async_to_streamed_response_wrapper( + models.list, + ) diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py index 1e11739f..a5e68a45 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py +++ b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py @@ -48,7 +48,6 @@ def list( self, workspace_uuid: str, *, - field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, only_deployed: bool | NotGiven = NOT_GIVEN, page: int | NotGiven = NOT_GIVEN, per_page: int | NotGiven = NOT_GIVEN, @@ -66,9 +65,9 @@ def list( Args: only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -91,7 +90,6 @@ def list( timeout=timeout, query=maybe_transform( { - "field_mask": field_mask, "only_deployed": only_deployed, "page": page, "per_page": per_page, @@ -116,10 +114,14 @@ def move( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AgentMoveResponse: """ - To move all listed agetns a given workspace, send a PUT request to + To move all listed agents a given workspace, send a PUT request to `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. Args: + agent_uuids: Agent uuids + + body_workspace_uuid: Workspace uuid to move agents to + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -174,7 +176,6 @@ async def list( self, workspace_uuid: str, *, - field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, only_deployed: bool | NotGiven = NOT_GIVEN, page: int | NotGiven = NOT_GIVEN, per_page: int | NotGiven = NOT_GIVEN, @@ -192,9 +193,9 @@ async def list( Args: only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -217,7 +218,6 @@ async def list( timeout=timeout, query=await async_maybe_transform( { - "field_mask": field_mask, "only_deployed": only_deployed, "page": page, "per_page": per_page, @@ -242,10 +242,14 @@ async def move( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AgentMoveResponse: """ - To move all listed agetns a given workspace, send a PUT request to + To move all listed agents a given workspace, send a PUT request to `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. Args: + agent_uuids: Agent uuids + + body_workspace_uuid: Workspace uuid to move agents to + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py index 0f506118..cb213e1d 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py +++ b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -81,6 +81,12 @@ def create( response body contains a JSON object with the newly created workspace object. Args: + agent_uuids: Ids of the agents(s) to attach to the workspace + + description: Description of the workspace + + name: Name of the workspace + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -164,6 +170,10 @@ def update( containing the workspace. Args: + description: The new description of the workspace + + name: The new name of the workspace + body_workspace_uuid: Workspace UUID. extra_headers: Send extra headers @@ -333,6 +343,12 @@ async def create( response body contains a JSON object with the newly created workspace object. Args: + agent_uuids: Ids of the agents(s) to attach to the workspace + + description: Description of the workspace + + name: Name of the workspace + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -416,6 +432,10 @@ async def update( containing the workspace. Args: + description: The new description of the workspace + + name: The new name of the workspace + body_workspace_uuid: Workspace UUID. extra_headers: Send extra headers diff --git a/src/gradientai/resources/agents/evaluation_runs.py b/src/gradientai/resources/agents/evaluation_runs.py index 47045132..c5ea2520 100644 --- a/src/gradientai/resources/agents/evaluation_runs.py +++ b/src/gradientai/resources/agents/evaluation_runs.py @@ -17,7 +17,7 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import evaluation_run_create_params +from ...types.agents import evaluation_run_create_params, evaluation_run_list_results_params from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse @@ -68,6 +68,8 @@ def create( run_name: The name of the run. + test_case_uuid: Test-case UUID to run + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -136,6 +138,8 @@ def list_results( self, evaluation_run_uuid: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -148,6 +152,10 @@ def list_results( `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. Args: + page: Page number. + + per_page: Items per page. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -165,7 +173,17 @@ def list_results( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + evaluation_run_list_results_params.EvaluationRunListResultsParams, + ), ), cast_to=EvaluationRunListResultsResponse, ) @@ -252,6 +270,8 @@ async def create( run_name: The name of the run. + test_case_uuid: Test-case UUID to run + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -320,6 +340,8 @@ async def list_results( self, evaluation_run_uuid: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -332,6 +354,10 @@ async def list_results( `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. Args: + page: Page number. + + per_page: Items per page. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -349,7 +375,17 @@ async def list_results( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + evaluation_run_list_results_params.EvaluationRunListResultsParams, + ), ), cast_to=EvaluationRunListResultsResponse, ) diff --git a/src/gradientai/resources/agents/evaluation_test_cases.py b/src/gradientai/resources/agents/evaluation_test_cases.py index beff8752..e33f9f91 100644 --- a/src/gradientai/resources/agents/evaluation_test_cases.py +++ b/src/gradientai/resources/agents/evaluation_test_cases.py @@ -179,7 +179,7 @@ def update( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EvaluationTestCaseUpdateResponse: """ - To update an evaluation test-case send a POST request to + To update an evaluation test-case send a PUT request to `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. Args: @@ -189,6 +189,8 @@ def update( name: Name of the test case. + body_test_case_uuid: Test-case UUID to update + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -201,7 +203,7 @@ def update( raise ValueError( f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" ) - return self._post( + return self._put( f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", @@ -439,7 +441,7 @@ async def update( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EvaluationTestCaseUpdateResponse: """ - To update an evaluation test-case send a POST request to + To update an evaluation test-case send a PUT request to `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. Args: @@ -449,6 +451,8 @@ async def update( name: Name of the test case. + body_test_case_uuid: Test-case UUID to update + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -461,7 +465,7 @@ async def update( raise ValueError( f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" ) - return await self._post( + return await self._put( f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 8c5f3f49..1c5b2015 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -66,6 +66,20 @@ def create( `/v2/gen-ai/agents/{agent_uuid}/functions`. Args: + body_agent_uuid: Agent id + + description: Function description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -123,6 +137,22 @@ def update( `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. Args: + body_agent_uuid: Agent id + + description: Funciton description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + body_function_uuid: Function id + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -241,6 +271,20 @@ async def create( `/v2/gen-ai/agents/{agent_uuid}/functions`. Args: + body_agent_uuid: Agent id + + description: Function description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -298,6 +342,22 @@ async def update( `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. Args: + body_agent_uuid: Agent id + + description: Funciton description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + body_function_uuid: Function id + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/routes.py b/src/gradientai/resources/agents/routes.py index ed25d795..a7a298f2 100644 --- a/src/gradientai/resources/agents/routes.py +++ b/src/gradientai/resources/agents/routes.py @@ -66,8 +66,16 @@ def update( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + + if_case: Describes the case in which the child agent should be used + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Route name + + uuid: Unique id of linkage + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -164,8 +172,12 @@ def add( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Name of route + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -280,8 +292,16 @@ async def update( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + + if_case: Describes the case in which the child agent should be used + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Route name + + uuid: Unique id of linkage + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -378,8 +398,12 @@ async def add( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Name of route + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index 65a35472..77eabea9 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -60,6 +60,10 @@ def update( `/v2/gen-ai/agents/{uuid}/versions`. Args: + body_uuid: Agent unique identifier + + version_hash: Unique identifier + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -105,9 +109,9 @@ def list( `/v2/gen-ai/agents/{uuid}/versions`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -178,6 +182,10 @@ async def update( `/v2/gen-ai/agents/{uuid}/versions`. Args: + body_uuid: Agent unique identifier + + version_hash: Unique identifier + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -223,9 +231,9 @@ async def list( `/v2/gen-ai/agents/{uuid}/versions`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index 6847cfc5..a0545173 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -64,6 +64,8 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -136,6 +138,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -177,6 +192,8 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -249,6 +266,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -290,6 +320,8 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -362,6 +394,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -403,6 +448,8 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -433,6 +480,8 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, @@ -488,6 +537,8 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -560,6 +611,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -601,6 +665,8 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -673,6 +739,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -714,6 +793,8 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -786,6 +867,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -827,6 +921,8 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -857,6 +953,8 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, diff --git a/src/gradientai/resources/gpu_droplets/__init__.py b/src/gradientai/resources/gpu_droplets/__init__.py new file mode 100644 index 00000000..064a36ce --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/__init__.py @@ -0,0 +1,187 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .sizes import ( + SizesResource, + AsyncSizesResource, + SizesResourceWithRawResponse, + AsyncSizesResourceWithRawResponse, + SizesResourceWithStreamingResponse, + AsyncSizesResourceWithStreamingResponse, +) +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .account import ( + AccountResource, + AsyncAccountResource, + AccountResourceWithRawResponse, + AsyncAccountResourceWithRawResponse, + AccountResourceWithStreamingResponse, + AsyncAccountResourceWithStreamingResponse, +) +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .backups import ( + BackupsResource, + AsyncBackupsResource, + BackupsResourceWithRawResponse, + AsyncBackupsResourceWithRawResponse, + BackupsResourceWithStreamingResponse, + AsyncBackupsResourceWithStreamingResponse, +) +from .volumes import ( + VolumesResource, + AsyncVolumesResource, + VolumesResourceWithRawResponse, + AsyncVolumesResourceWithRawResponse, + VolumesResourceWithStreamingResponse, + AsyncVolumesResourceWithStreamingResponse, +) +from .autoscale import ( + AutoscaleResource, + AsyncAutoscaleResource, + AutoscaleResourceWithRawResponse, + AsyncAutoscaleResourceWithRawResponse, + AutoscaleResourceWithStreamingResponse, + AsyncAutoscaleResourceWithStreamingResponse, +) +from .firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + FirewallsResourceWithRawResponse, + AsyncFirewallsResourceWithRawResponse, + FirewallsResourceWithStreamingResponse, + AsyncFirewallsResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from .floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + FloatingIPsResourceWithRawResponse, + AsyncFloatingIPsResourceWithRawResponse, + FloatingIPsResourceWithStreamingResponse, + AsyncFloatingIPsResourceWithStreamingResponse, +) +from .gpu_droplets import ( + GPUDropletsResource, + AsyncGPUDropletsResource, + GPUDropletsResourceWithRawResponse, + AsyncGPUDropletsResourceWithRawResponse, + GPUDropletsResourceWithStreamingResponse, + AsyncGPUDropletsResourceWithStreamingResponse, +) +from .load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + LoadBalancersResourceWithRawResponse, + AsyncLoadBalancersResourceWithRawResponse, + LoadBalancersResourceWithStreamingResponse, + AsyncLoadBalancersResourceWithStreamingResponse, +) +from .destroy_with_associated_resources import ( + DestroyWithAssociatedResourcesResource, + AsyncDestroyWithAssociatedResourcesResource, + DestroyWithAssociatedResourcesResourceWithRawResponse, + AsyncDestroyWithAssociatedResourcesResourceWithRawResponse, + DestroyWithAssociatedResourcesResourceWithStreamingResponse, + AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse, +) + +__all__ = [ + "BackupsResource", + "AsyncBackupsResource", + "BackupsResourceWithRawResponse", + "AsyncBackupsResourceWithRawResponse", + "BackupsResourceWithStreamingResponse", + "AsyncBackupsResourceWithStreamingResponse", + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "DestroyWithAssociatedResourcesResource", + "AsyncDestroyWithAssociatedResourcesResource", + "DestroyWithAssociatedResourcesResourceWithRawResponse", + "AsyncDestroyWithAssociatedResourcesResourceWithRawResponse", + "DestroyWithAssociatedResourcesResourceWithStreamingResponse", + "AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse", + "AutoscaleResource", + "AsyncAutoscaleResource", + "AutoscaleResourceWithRawResponse", + "AsyncAutoscaleResourceWithRawResponse", + "AutoscaleResourceWithStreamingResponse", + "AsyncAutoscaleResourceWithStreamingResponse", + "FirewallsResource", + "AsyncFirewallsResource", + "FirewallsResourceWithRawResponse", + "AsyncFirewallsResourceWithRawResponse", + "FirewallsResourceWithStreamingResponse", + "AsyncFirewallsResourceWithStreamingResponse", + "FloatingIPsResource", + "AsyncFloatingIPsResource", + "FloatingIPsResourceWithRawResponse", + "AsyncFloatingIPsResourceWithRawResponse", + "FloatingIPsResourceWithStreamingResponse", + "AsyncFloatingIPsResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", + "LoadBalancersResource", + "AsyncLoadBalancersResource", + "LoadBalancersResourceWithRawResponse", + "AsyncLoadBalancersResourceWithRawResponse", + "LoadBalancersResourceWithStreamingResponse", + "AsyncLoadBalancersResourceWithStreamingResponse", + "SizesResource", + "AsyncSizesResource", + "SizesResourceWithRawResponse", + "AsyncSizesResourceWithRawResponse", + "SizesResourceWithStreamingResponse", + "AsyncSizesResourceWithStreamingResponse", + "SnapshotsResource", + "AsyncSnapshotsResource", + "SnapshotsResourceWithRawResponse", + "AsyncSnapshotsResourceWithRawResponse", + "SnapshotsResourceWithStreamingResponse", + "AsyncSnapshotsResourceWithStreamingResponse", + "VolumesResource", + "AsyncVolumesResource", + "VolumesResourceWithRawResponse", + "AsyncVolumesResourceWithRawResponse", + "VolumesResourceWithStreamingResponse", + "AsyncVolumesResourceWithStreamingResponse", + "AccountResource", + "AsyncAccountResource", + "AccountResourceWithRawResponse", + "AsyncAccountResourceWithRawResponse", + "AccountResourceWithStreamingResponse", + "AsyncAccountResourceWithStreamingResponse", + "GPUDropletsResource", + "AsyncGPUDropletsResource", + "GPUDropletsResourceWithRawResponse", + "AsyncGPUDropletsResourceWithRawResponse", + "GPUDropletsResourceWithStreamingResponse", + "AsyncGPUDropletsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/account/__init__.py b/src/gradientai/resources/gpu_droplets/account/__init__.py new file mode 100644 index 00000000..33286c3f --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/account/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .account import ( + AccountResource, + AsyncAccountResource, + AccountResourceWithRawResponse, + AsyncAccountResourceWithRawResponse, + AccountResourceWithStreamingResponse, + AsyncAccountResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AccountResource", + "AsyncAccountResource", + "AccountResourceWithRawResponse", + "AsyncAccountResourceWithRawResponse", + "AccountResourceWithStreamingResponse", + "AsyncAccountResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/account/account.py b/src/gradientai/resources/gpu_droplets/account/account.py new file mode 100644 index 00000000..d61fb68b --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/account/account.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AccountResource", "AsyncAccountResource"] + + +class AccountResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AccountResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AccountResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AccountResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AccountResourceWithStreamingResponse(self) + + +class AsyncAccountResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAccountResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAccountResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAccountResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAccountResourceWithStreamingResponse(self) + + +class AccountResourceWithRawResponse: + def __init__(self, account: AccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._account.keys) + + +class AsyncAccountResourceWithRawResponse: + def __init__(self, account: AsyncAccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._account.keys) + + +class AccountResourceWithStreamingResponse: + def __init__(self, account: AccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._account.keys) + + +class AsyncAccountResourceWithStreamingResponse: + def __init__(self, account: AsyncAccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._account.keys) diff --git a/src/gradientai/resources/gpu_droplets/account/keys.py b/src/gradientai/resources/gpu_droplets/account/keys.py new file mode 100644 index 00000000..66d3bd55 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/account/keys.py @@ -0,0 +1,588 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.account import key_list_params, key_create_params, key_update_params +from ....types.gpu_droplets.account.key_list_response import KeyListResponse +from ....types.gpu_droplets.account.key_create_response import KeyCreateResponse +from ....types.gpu_droplets.account.key_update_response import KeyUpdateResponse +from ....types.gpu_droplets.account.key_retrieve_response import KeyRetrieveResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str, + public_key: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To add a new SSH public key to your DigitalOcean account, send a POST request to + `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the + `public_key` attribute to the full public key you are adding. + + Args: + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + public_key: The entire public key string that was uploaded. Embedded into the root user's + `authorized_keys` file if you include this key during Droplet creation. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + body=maybe_transform( + { + "name": name, + "public_key": public_key, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` + or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with + the key `ssh_key` and value an ssh_key object which contains the standard + ssh_key attributes. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + ssh_key_identifier: Union[int, str], + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update the name of an SSH key, send a PUT request to either + `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set + the `name` attribute to the new name you want to use. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._put( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + body=maybe_transform({"name": name}, key_update_params.KeyUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all of the keys in your account, send a GET request to + `/v2/account/keys`. The response will be a JSON object with a key set to + `ssh_keys`. The value of this will be an array of ssh_key objects, each of which + contains the standard ssh_key attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a public SSH key that you have in your account, send a DELETE request + to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204 + status will be returned, indicating that the action was successful and that the + response body is empty. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str, + public_key: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To add a new SSH public key to your DigitalOcean account, send a POST request to + `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the + `public_key` attribute to the full public key you are adding. + + Args: + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + public_key: The entire public key string that was uploaded. Embedded into the root user's + `authorized_keys` file if you include this key during Droplet creation. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + body=await async_maybe_transform( + { + "name": name, + "public_key": public_key, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` + or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with + the key `ssh_key` and value an ssh_key object which contains the standard + ssh_key attributes. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + ssh_key_identifier: Union[int, str], + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update the name of an SSH key, send a PUT request to either + `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set + the `name` attribute to the new name you want to use. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._put( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + body=await async_maybe_transform({"name": name}, key_update_params.KeyUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all of the keys in your account, send a GET request to + `/v2/account/keys`. The response will be a JSON object with a key set to + `ssh_keys`. The value of this will be an array of ssh_key objects, each of which + contains the standard ssh_key attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a public SSH key that you have in your account, send a DELETE request + to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204 + status will be returned, indicating that the action was successful and that the + response body is empty. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) diff --git a/src/gradientai/resources/gpu_droplets/actions.py b/src/gradientai/resources/gpu_droplets/actions.py new file mode 100644 index 00000000..197b2ce7 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/actions.py @@ -0,0 +1,2048 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, overload + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import action_list_params, action_initiate_params, action_bulk_initiate_params +from ...types.droplet_backup_policy_param import DropletBackupPolicyParam +from ...types.gpu_droplets.action_list_response import ActionListResponse +from ...types.gpu_droplets.action_initiate_response import ActionInitiateResponse +from ...types.gpu_droplets.action_retrieve_response import ActionRetrieveResponse +from ...types.gpu_droplets.action_bulk_initiate_response import ActionBulkInitiateResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + def retrieve( + self, + action_id: int, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve a Droplet action, send a GET request to + `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`. + + The response will be a JSON object with a key called `action`. The value will be + a Droplet action object. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve a list of all actions that have been executed for a Droplet, send a + GET request to `/v2/droplets/$DROPLET_ID/actions`. + + The results will be returned as a JSON object with an `actions` key. This will + be set to an array filled with `action` objects containing the standard `action` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + return self._post( + "/v2/droplets/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/actions", + body=maybe_transform( + { + "type": type, + "name": name, + }, + action_bulk_initiate_params.ActionBulkInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams), + ), + cast_to=ActionBulkInitiateResponse, + ) + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup + plan will default to daily. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The ID of a backup of the current Droplet instance to restore from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. + This is a permanent change and cannot be reversed as a Droplet's disk size + cannot be decreased. + + size: The slug identifier for the size to which you wish to resize the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: Union[str, int] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The image ID of a public or private image or the slug identifier for a public + image. The Droplet will be rebuilt using this image as its base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The new name for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + kernel: A unique number used to identify and reference a specific kernel. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + image: int | Union[str, int] | NotGiven = NOT_GIVEN, + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + return self._post( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + body=maybe_transform( + { + "type": type, + "backup_policy": backup_policy, + "image": image, + "disk": disk, + "size": size, + "name": name, + "kernel": kernel, + }, + action_initiate_params.ActionInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionInitiateResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + async def retrieve( + self, + action_id: int, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve a Droplet action, send a GET request to + `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`. + + The response will be a JSON object with a key called `action`. The value will be + a Droplet action object. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + async def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve a list of all actions that have been executed for a Droplet, send a + GET request to `/v2/droplets/$DROPLET_ID/actions`. + + The results will be returned as a JSON object with an `actions` key. This will + be set to an array filled with `action` objects containing the standard `action` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + async def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + async def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + return await self._post( + "/v2/droplets/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/actions", + body=await async_maybe_transform( + { + "type": type, + "name": name, + }, + action_bulk_initiate_params.ActionBulkInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams + ), + ), + cast_to=ActionBulkInitiateResponse, + ) + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup + plan will default to daily. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The ID of a backup of the current Droplet instance to restore from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. + This is a permanent change and cannot be reversed as a Droplet's disk size + cannot be decreased. + + size: The slug identifier for the size to which you wish to resize the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: Union[str, int] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The image ID of a public or private image or the slug identifier for a public + image. The Droplet will be rebuilt using this image as its base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The new name for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + kernel: A unique number used to identify and reference a specific kernel. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + image: int | Union[str, int] | NotGiven = NOT_GIVEN, + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + return await self._post( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + body=await async_maybe_transform( + { + "type": type, + "backup_policy": backup_policy, + "image": image, + "disk": disk, + "size": size, + "name": name, + "kernel": kernel, + }, + action_initiate_params.ActionInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionInitiateResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + self.bulk_initiate = to_raw_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = to_raw_response_wrapper( + actions.initiate, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + self.bulk_initiate = async_to_raw_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = async_to_raw_response_wrapper( + actions.initiate, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + self.bulk_initiate = to_streamed_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = to_streamed_response_wrapper( + actions.initiate, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) + self.bulk_initiate = async_to_streamed_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = async_to_streamed_response_wrapper( + actions.initiate, + ) diff --git a/src/gradientai/resources/gpu_droplets/autoscale.py b/src/gradientai/resources/gpu_droplets/autoscale.py new file mode 100644 index 00000000..a1a72430 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/autoscale.py @@ -0,0 +1,967 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import ( + autoscale_list_params, + autoscale_create_params, + autoscale_update_params, + autoscale_list_history_params, + autoscale_list_members_params, +) +from ...types.gpu_droplets.autoscale_list_response import AutoscaleListResponse +from ...types.gpu_droplets.autoscale_create_response import AutoscaleCreateResponse +from ...types.gpu_droplets.autoscale_update_response import AutoscaleUpdateResponse +from ...types.gpu_droplets.autoscale_retrieve_response import AutoscaleRetrieveResponse +from ...types.gpu_droplets.autoscale_list_history_response import AutoscaleListHistoryResponse +from ...types.gpu_droplets.autoscale_list_members_response import AutoscaleListMembersResponse +from ...types.gpu_droplets.autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam + +__all__ = ["AutoscaleResource", "AsyncAutoscaleResource"] + + +class AutoscaleResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AutoscaleResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AutoscaleResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AutoscaleResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AutoscaleResourceWithStreamingResponse(self) + + def create( + self, + *, + config: autoscale_create_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleCreateResponse: + """ + To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` + setting the required attributes. + + The response body will contain a JSON object with a key called `autoscale_pool` + containing the standard attributes for the new autoscale pool. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + body=maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_create_params.AutoscaleCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleCreateResponse, + ) + + def retrieve( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleRetrieveResponse: + """ + To show information about an individual autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleRetrieveResponse, + ) + + def update( + self, + autoscale_pool_id: str, + *, + config: autoscale_update_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleUpdateResponse: + """ + To update the configuration of an existing autoscale pool, send a PUT request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full + representation of the autoscale pool including existing attributes. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._put( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + body=maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_update_params.AutoscaleUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleUpdateResponse, + ) + + def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListResponse: + """ + To list all autoscale pools in your team, send a GET request to + `/v2/droplets/autoscale`. The response body will be a JSON object with a key of + `autoscale_pools` containing an array of autoscale pool objects. These each + contain the standard autoscale pool attributes. + + Args: + name: The name of the autoscale pool + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + }, + autoscale_list_params.AutoscaleListParams, + ), + ), + cast_to=AutoscaleListResponse, + ) + + def delete( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool, send a DELETE request to the + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint. + + A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_dangerous( + self, + autoscale_pool_id: str, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool and its associated resources (Droplets), send a + DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous` + endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def list_history( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListHistoryResponse: + """ + To list all of the scaling history events of an autoscale pool, send a GET + request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`. + + The response body will be a JSON object with a key of `history`. This will be + set to an array containing objects each representing a history event. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/history" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_history_params.AutoscaleListHistoryParams, + ), + ), + cast_to=AutoscaleListHistoryResponse, + ) + + def list_members( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListMembersResponse: + """ + To list the Droplets in an autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing information about each of the Droplets in the + autoscale pool. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/members" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_members_params.AutoscaleListMembersParams, + ), + ), + cast_to=AutoscaleListMembersResponse, + ) + + +class AsyncAutoscaleResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAutoscaleResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAutoscaleResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAutoscaleResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAutoscaleResourceWithStreamingResponse(self) + + async def create( + self, + *, + config: autoscale_create_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleCreateResponse: + """ + To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` + setting the required attributes. + + The response body will contain a JSON object with a key called `autoscale_pool` + containing the standard attributes for the new autoscale pool. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + body=await async_maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_create_params.AutoscaleCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleCreateResponse, + ) + + async def retrieve( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleRetrieveResponse: + """ + To show information about an individual autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleRetrieveResponse, + ) + + async def update( + self, + autoscale_pool_id: str, + *, + config: autoscale_update_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleUpdateResponse: + """ + To update the configuration of an existing autoscale pool, send a PUT request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full + representation of the autoscale pool including existing attributes. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._put( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + body=await async_maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_update_params.AutoscaleUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleUpdateResponse, + ) + + async def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListResponse: + """ + To list all autoscale pools in your team, send a GET request to + `/v2/droplets/autoscale`. The response body will be a JSON object with a key of + `autoscale_pools` containing an array of autoscale pool objects. These each + contain the standard autoscale pool attributes. + + Args: + name: The name of the autoscale pool + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + }, + autoscale_list_params.AutoscaleListParams, + ), + ), + cast_to=AutoscaleListResponse, + ) + + async def delete( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool, send a DELETE request to the + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint. + + A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_dangerous( + self, + autoscale_pool_id: str, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool and its associated resources (Droplets), send a + DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous` + endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return await self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def list_history( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListHistoryResponse: + """ + To list all of the scaling history events of an autoscale pool, send a GET + request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`. + + The response body will be a JSON object with a key of `history`. This will be + set to an array containing objects each representing a history event. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/history" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_history_params.AutoscaleListHistoryParams, + ), + ), + cast_to=AutoscaleListHistoryResponse, + ) + + async def list_members( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListMembersResponse: + """ + To list the Droplets in an autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing information about each of the Droplets in the + autoscale pool. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/members" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_members_params.AutoscaleListMembersParams, + ), + ), + cast_to=AutoscaleListMembersResponse, + ) + + +class AutoscaleResourceWithRawResponse: + def __init__(self, autoscale: AutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = to_raw_response_wrapper( + autoscale.create, + ) + self.retrieve = to_raw_response_wrapper( + autoscale.retrieve, + ) + self.update = to_raw_response_wrapper( + autoscale.update, + ) + self.list = to_raw_response_wrapper( + autoscale.list, + ) + self.delete = to_raw_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = to_raw_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = to_raw_response_wrapper( + autoscale.list_history, + ) + self.list_members = to_raw_response_wrapper( + autoscale.list_members, + ) + + +class AsyncAutoscaleResourceWithRawResponse: + def __init__(self, autoscale: AsyncAutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = async_to_raw_response_wrapper( + autoscale.create, + ) + self.retrieve = async_to_raw_response_wrapper( + autoscale.retrieve, + ) + self.update = async_to_raw_response_wrapper( + autoscale.update, + ) + self.list = async_to_raw_response_wrapper( + autoscale.list, + ) + self.delete = async_to_raw_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = async_to_raw_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = async_to_raw_response_wrapper( + autoscale.list_history, + ) + self.list_members = async_to_raw_response_wrapper( + autoscale.list_members, + ) + + +class AutoscaleResourceWithStreamingResponse: + def __init__(self, autoscale: AutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = to_streamed_response_wrapper( + autoscale.create, + ) + self.retrieve = to_streamed_response_wrapper( + autoscale.retrieve, + ) + self.update = to_streamed_response_wrapper( + autoscale.update, + ) + self.list = to_streamed_response_wrapper( + autoscale.list, + ) + self.delete = to_streamed_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = to_streamed_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = to_streamed_response_wrapper( + autoscale.list_history, + ) + self.list_members = to_streamed_response_wrapper( + autoscale.list_members, + ) + + +class AsyncAutoscaleResourceWithStreamingResponse: + def __init__(self, autoscale: AsyncAutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = async_to_streamed_response_wrapper( + autoscale.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + autoscale.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + autoscale.update, + ) + self.list = async_to_streamed_response_wrapper( + autoscale.list, + ) + self.delete = async_to_streamed_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = async_to_streamed_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = async_to_streamed_response_wrapper( + autoscale.list_history, + ) + self.list_members = async_to_streamed_response_wrapper( + autoscale.list_members, + ) diff --git a/src/gradientai/resources/gpu_droplets/backups.py b/src/gradientai/resources/gpu_droplets/backups.py new file mode 100644 index 00000000..06fca19e --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/backups.py @@ -0,0 +1,460 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import backup_list_params, backup_list_policies_params +from ...types.gpu_droplets.backup_list_response import BackupListResponse +from ...types.gpu_droplets.backup_list_policies_response import BackupListPoliciesResponse +from ...types.gpu_droplets.backup_retrieve_policy_response import BackupRetrievePolicyResponse +from ...types.gpu_droplets.backup_list_supported_policies_response import BackupListSupportedPoliciesResponse + +__all__ = ["BackupsResource", "AsyncBackupsResource"] + + +class BackupsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> BackupsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return BackupsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> BackupsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return BackupsResourceWithStreamingResponse(self) + + def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListResponse: + """ + To retrieve any backups associated with a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/backups`. + + You will get back a JSON object that has a `backups` key. This will be set to an + array of backup objects, each of which contain the standard Droplet backup + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/backups" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_params.BackupListParams, + ), + ), + cast_to=BackupListResponse, + ) + + def list_policies( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListPoliciesResponse: + """ + To list information about the backup policies for all Droplets in the account, + send a GET request to `/v2/droplets/backups/policies`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/droplets/backups/policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/policies", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_policies_params.BackupListPoliciesParams, + ), + ), + cast_to=BackupListPoliciesResponse, + ) + + def list_supported_policies( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListSupportedPoliciesResponse: + """ + To retrieve a list of all supported Droplet backup policies, send a GET request + to `/v2/droplets/backups/supported_policies`. + """ + return self._get( + "/v2/droplets/backups/supported_policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/supported_policies", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupListSupportedPoliciesResponse, + ) + + def retrieve_policy( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupRetrievePolicyResponse: + """ + To show information about an individual Droplet's backup policy, send a GET + request to `/v2/droplets/$DROPLET_ID/backups/policy`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/backups/policy" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupRetrievePolicyResponse, + ) + + +class AsyncBackupsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncBackupsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncBackupsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncBackupsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncBackupsResourceWithStreamingResponse(self) + + async def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListResponse: + """ + To retrieve any backups associated with a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/backups`. + + You will get back a JSON object that has a `backups` key. This will be set to an + array of backup objects, each of which contain the standard Droplet backup + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/backups" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_params.BackupListParams, + ), + ), + cast_to=BackupListResponse, + ) + + async def list_policies( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListPoliciesResponse: + """ + To list information about the backup policies for all Droplets in the account, + send a GET request to `/v2/droplets/backups/policies`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/droplets/backups/policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/policies", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_policies_params.BackupListPoliciesParams, + ), + ), + cast_to=BackupListPoliciesResponse, + ) + + async def list_supported_policies( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListSupportedPoliciesResponse: + """ + To retrieve a list of all supported Droplet backup policies, send a GET request + to `/v2/droplets/backups/supported_policies`. + """ + return await self._get( + "/v2/droplets/backups/supported_policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/supported_policies", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupListSupportedPoliciesResponse, + ) + + async def retrieve_policy( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupRetrievePolicyResponse: + """ + To show information about an individual Droplet's backup policy, send a GET + request to `/v2/droplets/$DROPLET_ID/backups/policy`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/backups/policy" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupRetrievePolicyResponse, + ) + + +class BackupsResourceWithRawResponse: + def __init__(self, backups: BackupsResource) -> None: + self._backups = backups + + self.list = to_raw_response_wrapper( + backups.list, + ) + self.list_policies = to_raw_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = to_raw_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = to_raw_response_wrapper( + backups.retrieve_policy, + ) + + +class AsyncBackupsResourceWithRawResponse: + def __init__(self, backups: AsyncBackupsResource) -> None: + self._backups = backups + + self.list = async_to_raw_response_wrapper( + backups.list, + ) + self.list_policies = async_to_raw_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = async_to_raw_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = async_to_raw_response_wrapper( + backups.retrieve_policy, + ) + + +class BackupsResourceWithStreamingResponse: + def __init__(self, backups: BackupsResource) -> None: + self._backups = backups + + self.list = to_streamed_response_wrapper( + backups.list, + ) + self.list_policies = to_streamed_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = to_streamed_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = to_streamed_response_wrapper( + backups.retrieve_policy, + ) + + +class AsyncBackupsResourceWithStreamingResponse: + def __init__(self, backups: AsyncBackupsResource) -> None: + self._backups = backups + + self.list = async_to_streamed_response_wrapper( + backups.list, + ) + self.list_policies = async_to_streamed_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = async_to_streamed_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = async_to_streamed_response_wrapper( + backups.retrieve_policy, + ) diff --git a/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py new file mode 100644 index 00000000..46db6563 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py @@ -0,0 +1,624 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import destroy_with_associated_resource_delete_selective_params +from ...types.gpu_droplets.destroy_with_associated_resource_list_response import ( + DestroyWithAssociatedResourceListResponse, +) +from ...types.gpu_droplets.destroy_with_associated_resource_check_status_response import ( + DestroyWithAssociatedResourceCheckStatusResponse, +) + +__all__ = ["DestroyWithAssociatedResourcesResource", "AsyncDestroyWithAssociatedResourcesResource"] + + +class DestroyWithAssociatedResourcesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DestroyWithAssociatedResourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DestroyWithAssociatedResourcesResourceWithStreamingResponse(self) + + def list( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceListResponse: + """ + To list the associated billable resources that can be destroyed along with a + Droplet, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint. + + This endpoint will only return resources that you are authorized to see. For + example, to see associated Reserved IPs, include the `reserved_ip:read` scope. + + The response will be a JSON object containing `snapshots`, `volumes`, and + `volume_snapshots` keys. Each will be set to an array of objects containing + information about the associated resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceListResponse, + ) + + def check_status( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceCheckStatusResponse: + """ + To check on the status of a request to destroy a Droplet with its associated + resources, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceCheckStatusResponse, + ) + + def delete_dangerous( + self, + droplet_id: int, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with all of its associated resources, send a DELETE + request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint. + The headers of this request must include an `X-Dangerous` key set to `true`. To + preview which resources will be destroyed, first query the Droplet's associated + resources. This operation _can not_ be reverse and should be used with caution. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_selective( + self, + droplet_id: int, + *, + floating_ips: List[str] | NotGiven = NOT_GIVEN, + reserved_ips: List[str] | NotGiven = NOT_GIVEN, + snapshots: List[str] | NotGiven = NOT_GIVEN, + volume_snapshots: List[str] | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with a sub-set of its associated resources, send a + DELETE request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint. + The JSON body of the request should include `reserved_ips`, `snapshots`, + `volumes`, or `volume_snapshots` keys each set to an array of IDs for the + associated resources to be destroyed. The IDs can be found by querying the + Droplet's associated resources. Any associated resource not included in the + request will remain and continue to accrue changes on your account. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + floating_ips: An array of unique identifiers for the floating IPs to be scheduled for + deletion. + + reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for + deletion. + + snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion. + + volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for + deletion. + + volumes: An array of unique identifiers for the volumes to be scheduled for deletion. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective", + body=maybe_transform( + { + "floating_ips": floating_ips, + "reserved_ips": reserved_ips, + "snapshots": snapshots, + "volume_snapshots": volume_snapshots, + "volumes": volumes, + }, + destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def retry( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + If the status of a request to destroy a Droplet with its associated resources + reported any errors, it can be retried by sending a POST request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint. + + Only one destroy can be active at a time per Droplet. If a retry is issued while + another destroy is in progress for the Droplet a 409 status code will be + returned. A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDestroyWithAssociatedResourcesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse(self) + + async def list( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceListResponse: + """ + To list the associated billable resources that can be destroyed along with a + Droplet, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint. + + This endpoint will only return resources that you are authorized to see. For + example, to see associated Reserved IPs, include the `reserved_ip:read` scope. + + The response will be a JSON object containing `snapshots`, `volumes`, and + `volume_snapshots` keys. Each will be set to an array of objects containing + information about the associated resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceListResponse, + ) + + async def check_status( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceCheckStatusResponse: + """ + To check on the status of a request to destroy a Droplet with its associated + resources, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceCheckStatusResponse, + ) + + async def delete_dangerous( + self, + droplet_id: int, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with all of its associated resources, send a DELETE + request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint. + The headers of this request must include an `X-Dangerous` key set to `true`. To + preview which resources will be destroyed, first query the Droplet's associated + resources. This operation _can not_ be reverse and should be used with caution. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return await self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_selective( + self, + droplet_id: int, + *, + floating_ips: List[str] | NotGiven = NOT_GIVEN, + reserved_ips: List[str] | NotGiven = NOT_GIVEN, + snapshots: List[str] | NotGiven = NOT_GIVEN, + volume_snapshots: List[str] | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with a sub-set of its associated resources, send a + DELETE request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint. + The JSON body of the request should include `reserved_ips`, `snapshots`, + `volumes`, or `volume_snapshots` keys each set to an array of IDs for the + associated resources to be destroyed. The IDs can be found by querying the + Droplet's associated resources. Any associated resource not included in the + request will remain and continue to accrue changes on your account. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + floating_ips: An array of unique identifiers for the floating IPs to be scheduled for + deletion. + + reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for + deletion. + + snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion. + + volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for + deletion. + + volumes: An array of unique identifiers for the volumes to be scheduled for deletion. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective", + body=await async_maybe_transform( + { + "floating_ips": floating_ips, + "reserved_ips": reserved_ips, + "snapshots": snapshots, + "volume_snapshots": volume_snapshots, + "volumes": volumes, + }, + destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def retry( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + If the status of a request to destroy a Droplet with its associated resources + reported any errors, it can be retried by sending a POST request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint. + + Only one destroy can be active at a time per Droplet. If a retry is issued while + another destroy is in progress for the Droplet a 409 status code will be + returned. A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DestroyWithAssociatedResourcesResourceWithRawResponse: + def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = to_raw_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = to_raw_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = to_raw_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = to_raw_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = to_raw_response_wrapper( + destroy_with_associated_resources.retry, + ) + + +class AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: + def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = async_to_raw_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = async_to_raw_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = async_to_raw_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = async_to_raw_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = async_to_raw_response_wrapper( + destroy_with_associated_resources.retry, + ) + + +class DestroyWithAssociatedResourcesResourceWithStreamingResponse: + def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = to_streamed_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = to_streamed_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = to_streamed_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = to_streamed_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = to_streamed_response_wrapper( + destroy_with_associated_resources.retry, + ) + + +class AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: + def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = async_to_streamed_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = async_to_streamed_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = async_to_streamed_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = async_to_streamed_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = async_to_streamed_response_wrapper( + destroy_with_associated_resources.retry, + ) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/gradientai/resources/gpu_droplets/firewalls/__init__.py new file mode 100644 index 00000000..e9cb832f --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/__init__.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .tags import ( + TagsResource, + AsyncTagsResource, + TagsResourceWithRawResponse, + AsyncTagsResourceWithRawResponse, + TagsResourceWithStreamingResponse, + AsyncTagsResourceWithStreamingResponse, +) +from .rules import ( + RulesResource, + AsyncRulesResource, + RulesResourceWithRawResponse, + AsyncRulesResourceWithRawResponse, + RulesResourceWithStreamingResponse, + AsyncRulesResourceWithStreamingResponse, +) +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from .firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + FirewallsResourceWithRawResponse, + AsyncFirewallsResourceWithRawResponse, + FirewallsResourceWithStreamingResponse, + AsyncFirewallsResourceWithStreamingResponse, +) + +__all__ = [ + "DropletsResource", + "AsyncDropletsResource", + "DropletsResourceWithRawResponse", + "AsyncDropletsResourceWithRawResponse", + "DropletsResourceWithStreamingResponse", + "AsyncDropletsResourceWithStreamingResponse", + "TagsResource", + "AsyncTagsResource", + "TagsResourceWithRawResponse", + "AsyncTagsResourceWithRawResponse", + "TagsResourceWithStreamingResponse", + "AsyncTagsResourceWithStreamingResponse", + "RulesResource", + "AsyncRulesResource", + "RulesResourceWithRawResponse", + "AsyncRulesResourceWithRawResponse", + "RulesResourceWithStreamingResponse", + "AsyncRulesResourceWithStreamingResponse", + "FirewallsResource", + "AsyncFirewallsResource", + "FirewallsResourceWithRawResponse", + "AsyncFirewallsResourceWithRawResponse", + "FirewallsResourceWithStreamingResponse", + "AsyncFirewallsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/gradientai/resources/gpu_droplets/firewalls/droplets.py new file mode 100644 index 00000000..025d1ba4 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/droplets.py @@ -0,0 +1,296 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.firewalls import droplet_add_params, droplet_remove_params + +__all__ = ["DropletsResource", "AsyncDropletsResource"] + + +class DropletsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DropletsResourceWithStreamingResponse(self) + + def add( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDropletsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDropletsResourceWithStreamingResponse(self) + + async def add( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DropletsResourceWithRawResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_raw_response_wrapper( + droplets.add, + ) + self.remove = to_raw_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithRawResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_raw_response_wrapper( + droplets.add, + ) + self.remove = async_to_raw_response_wrapper( + droplets.remove, + ) + + +class DropletsResourceWithStreamingResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_streamed_response_wrapper( + droplets.add, + ) + self.remove = to_streamed_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithStreamingResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_streamed_response_wrapper( + droplets.add, + ) + self.remove = async_to_streamed_response_wrapper( + droplets.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py new file mode 100644 index 00000000..a6c21928 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py @@ -0,0 +1,647 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .tags import ( + TagsResource, + AsyncTagsResource, + TagsResourceWithRawResponse, + AsyncTagsResourceWithRawResponse, + TagsResourceWithStreamingResponse, + AsyncTagsResourceWithStreamingResponse, +) +from .rules import ( + RulesResource, + AsyncRulesResource, + RulesResourceWithRawResponse, + AsyncRulesResourceWithRawResponse, + RulesResourceWithStreamingResponse, + AsyncRulesResourceWithStreamingResponse, +) +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import firewall_list_params, firewall_create_params, firewall_update_params +from ....types.gpu_droplets.firewall_param import FirewallParam +from ....types.gpu_droplets.firewall_list_response import FirewallListResponse +from ....types.gpu_droplets.firewall_create_response import FirewallCreateResponse +from ....types.gpu_droplets.firewall_update_response import FirewallUpdateResponse +from ....types.gpu_droplets.firewall_retrieve_response import FirewallRetrieveResponse + +__all__ = ["FirewallsResource", "AsyncFirewallsResource"] + + +class FirewallsResource(SyncAPIResource): + @cached_property + def droplets(self) -> DropletsResource: + return DropletsResource(self._client) + + @cached_property + def tags(self) -> TagsResource: + return TagsResource(self._client) + + @cached_property + def rules(self) -> RulesResource: + return RulesResource(self._client) + + @cached_property + def with_raw_response(self) -> FirewallsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return FirewallsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FirewallsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return FirewallsResourceWithStreamingResponse(self) + + def create( + self, + *, + body: firewall_create_params.Body | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallCreateResponse: + """To create a new firewall, send a POST request to `/v2/firewalls`. + + The request + must contain at least one inbound or outbound access rule. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + body=maybe_transform(body, firewall_create_params.FirewallCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallCreateResponse, + ) + + def retrieve( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallRetrieveResponse: + """ + To show information about an existing firewall, send a GET request to + `/v2/firewalls/$FIREWALL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return self._get( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallRetrieveResponse, + ) + + def update( + self, + firewall_id: str, + *, + firewall: FirewallParam, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallUpdateResponse: + """ + To update the configuration of an existing firewall, send a PUT request to + `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation + of the firewall including existing attributes. **Note that any attributes that + are not provided will be reset to their default values.** + + You must have read access (e.g. `droplet:read`) to all resources attached to the + firewall to successfully update the firewall. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return self._put( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + body=maybe_transform(firewall, firewall_update_params.FirewallUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallListResponse: + """ + To list all of the firewalls available on your account, send a GET request to + `/v2/firewalls`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + firewall_list_params.FirewallListParams, + ), + ), + cast_to=FirewallListResponse, + ) + + def delete( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncFirewallsResource(AsyncAPIResource): + @cached_property + def droplets(self) -> AsyncDropletsResource: + return AsyncDropletsResource(self._client) + + @cached_property + def tags(self) -> AsyncTagsResource: + return AsyncTagsResource(self._client) + + @cached_property + def rules(self) -> AsyncRulesResource: + return AsyncRulesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFirewallsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncFirewallsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFirewallsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncFirewallsResourceWithStreamingResponse(self) + + async def create( + self, + *, + body: firewall_create_params.Body | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallCreateResponse: + """To create a new firewall, send a POST request to `/v2/firewalls`. + + The request + must contain at least one inbound or outbound access rule. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + body=await async_maybe_transform(body, firewall_create_params.FirewallCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallCreateResponse, + ) + + async def retrieve( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallRetrieveResponse: + """ + To show information about an existing firewall, send a GET request to + `/v2/firewalls/$FIREWALL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return await self._get( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallRetrieveResponse, + ) + + async def update( + self, + firewall_id: str, + *, + firewall: FirewallParam, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallUpdateResponse: + """ + To update the configuration of an existing firewall, send a PUT request to + `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation + of the firewall including existing attributes. **Note that any attributes that + are not provided will be reset to their default values.** + + You must have read access (e.g. `droplet:read`) to all resources attached to the + firewall to successfully update the firewall. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return await self._put( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + body=await async_maybe_transform(firewall, firewall_update_params.FirewallUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallListResponse: + """ + To list all of the firewalls available on your account, send a GET request to + `/v2/firewalls`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + firewall_list_params.FirewallListParams, + ), + ), + cast_to=FirewallListResponse, + ) + + async def delete( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class FirewallsResourceWithRawResponse: + def __init__(self, firewalls: FirewallsResource) -> None: + self._firewalls = firewalls + + self.create = to_raw_response_wrapper( + firewalls.create, + ) + self.retrieve = to_raw_response_wrapper( + firewalls.retrieve, + ) + self.update = to_raw_response_wrapper( + firewalls.update, + ) + self.list = to_raw_response_wrapper( + firewalls.list, + ) + self.delete = to_raw_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithRawResponse: + return DropletsResourceWithRawResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> TagsResourceWithRawResponse: + return TagsResourceWithRawResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> RulesResourceWithRawResponse: + return RulesResourceWithRawResponse(self._firewalls.rules) + + +class AsyncFirewallsResourceWithRawResponse: + def __init__(self, firewalls: AsyncFirewallsResource) -> None: + self._firewalls = firewalls + + self.create = async_to_raw_response_wrapper( + firewalls.create, + ) + self.retrieve = async_to_raw_response_wrapper( + firewalls.retrieve, + ) + self.update = async_to_raw_response_wrapper( + firewalls.update, + ) + self.list = async_to_raw_response_wrapper( + firewalls.list, + ) + self.delete = async_to_raw_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithRawResponse: + return AsyncDropletsResourceWithRawResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> AsyncTagsResourceWithRawResponse: + return AsyncTagsResourceWithRawResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> AsyncRulesResourceWithRawResponse: + return AsyncRulesResourceWithRawResponse(self._firewalls.rules) + + +class FirewallsResourceWithStreamingResponse: + def __init__(self, firewalls: FirewallsResource) -> None: + self._firewalls = firewalls + + self.create = to_streamed_response_wrapper( + firewalls.create, + ) + self.retrieve = to_streamed_response_wrapper( + firewalls.retrieve, + ) + self.update = to_streamed_response_wrapper( + firewalls.update, + ) + self.list = to_streamed_response_wrapper( + firewalls.list, + ) + self.delete = to_streamed_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithStreamingResponse: + return DropletsResourceWithStreamingResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> TagsResourceWithStreamingResponse: + return TagsResourceWithStreamingResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> RulesResourceWithStreamingResponse: + return RulesResourceWithStreamingResponse(self._firewalls.rules) + + +class AsyncFirewallsResourceWithStreamingResponse: + def __init__(self, firewalls: AsyncFirewallsResource) -> None: + self._firewalls = firewalls + + self.create = async_to_streamed_response_wrapper( + firewalls.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + firewalls.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + firewalls.update, + ) + self.list = async_to_streamed_response_wrapper( + firewalls.list, + ) + self.delete = async_to_streamed_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithStreamingResponse: + return AsyncDropletsResourceWithStreamingResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> AsyncTagsResourceWithStreamingResponse: + return AsyncTagsResourceWithStreamingResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> AsyncRulesResourceWithStreamingResponse: + return AsyncRulesResourceWithStreamingResponse(self._firewalls.rules) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/rules.py b/src/gradientai/resources/gpu_droplets/firewalls/rules.py new file mode 100644 index 00000000..61026779 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/rules.py @@ -0,0 +1,320 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.firewalls import rule_add_params, rule_remove_params + +__all__ = ["RulesResource", "AsyncRulesResource"] + + +class RulesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return RulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return RulesResourceWithStreamingResponse(self) + + def add( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add additional access rules to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + inbound_rules and/or outbound_rules attribute containing an array of rules to be + added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_add_params.RuleAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove access rules from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + `inbound_rules` and/or `outbound_rules` attribute containing an array of rules + to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_remove_params.RuleRemoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncRulesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncRulesResourceWithStreamingResponse(self) + + async def add( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add additional access rules to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + inbound_rules and/or outbound_rules attribute containing an array of rules to be + added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=await async_maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_add_params.RuleAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove access rules from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + `inbound_rules` and/or `outbound_rules` attribute containing an array of rules + to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=await async_maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_remove_params.RuleRemoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class RulesResourceWithRawResponse: + def __init__(self, rules: RulesResource) -> None: + self._rules = rules + + self.add = to_raw_response_wrapper( + rules.add, + ) + self.remove = to_raw_response_wrapper( + rules.remove, + ) + + +class AsyncRulesResourceWithRawResponse: + def __init__(self, rules: AsyncRulesResource) -> None: + self._rules = rules + + self.add = async_to_raw_response_wrapper( + rules.add, + ) + self.remove = async_to_raw_response_wrapper( + rules.remove, + ) + + +class RulesResourceWithStreamingResponse: + def __init__(self, rules: RulesResource) -> None: + self._rules = rules + + self.add = to_streamed_response_wrapper( + rules.add, + ) + self.remove = to_streamed_response_wrapper( + rules.remove, + ) + + +class AsyncRulesResourceWithStreamingResponse: + def __init__(self, rules: AsyncRulesResource) -> None: + self._rules = rules + + self.add = async_to_streamed_response_wrapper( + rules.add, + ) + self.remove = async_to_streamed_response_wrapper( + rules.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/tags.py b/src/gradientai/resources/gpu_droplets/firewalls/tags.py new file mode 100644 index 00000000..725bc014 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/tags.py @@ -0,0 +1,308 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.firewalls import tag_add_params, tag_remove_params + +__all__ = ["TagsResource", "AsyncTagsResource"] + + +class TagsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> TagsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return TagsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> TagsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return TagsResourceWithStreamingResponse(self) + + def add( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a tag representing a group of Droplets to a firewall, send a POST + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=maybe_transform({"tags": tags}, tag_add_params.TagAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a tag representing a group of Droplets from a firewall, send a DELETE + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncTagsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncTagsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncTagsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncTagsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncTagsResourceWithStreamingResponse(self) + + async def add( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a tag representing a group of Droplets to a firewall, send a POST + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=await async_maybe_transform({"tags": tags}, tag_add_params.TagAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a tag representing a group of Droplets from a firewall, send a DELETE + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=await async_maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class TagsResourceWithRawResponse: + def __init__(self, tags: TagsResource) -> None: + self._tags = tags + + self.add = to_raw_response_wrapper( + tags.add, + ) + self.remove = to_raw_response_wrapper( + tags.remove, + ) + + +class AsyncTagsResourceWithRawResponse: + def __init__(self, tags: AsyncTagsResource) -> None: + self._tags = tags + + self.add = async_to_raw_response_wrapper( + tags.add, + ) + self.remove = async_to_raw_response_wrapper( + tags.remove, + ) + + +class TagsResourceWithStreamingResponse: + def __init__(self, tags: TagsResource) -> None: + self._tags = tags + + self.add = to_streamed_response_wrapper( + tags.add, + ) + self.remove = to_streamed_response_wrapper( + tags.remove, + ) + + +class AsyncTagsResourceWithStreamingResponse: + def __init__(self, tags: AsyncTagsResource) -> None: + self._tags = tags + + self.add = async_to_streamed_response_wrapper( + tags.add, + ) + self.remove = async_to_streamed_response_wrapper( + tags.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py new file mode 100644 index 00000000..bf6871b1 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + FloatingIPsResourceWithRawResponse, + AsyncFloatingIPsResourceWithRawResponse, + FloatingIPsResourceWithStreamingResponse, + AsyncFloatingIPsResourceWithStreamingResponse, +) + +__all__ = [ + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "FloatingIPsResource", + "AsyncFloatingIPsResource", + "FloatingIPsResourceWithRawResponse", + "AsyncFloatingIPsResourceWithRawResponse", + "FloatingIPsResourceWithStreamingResponse", + "AsyncFloatingIPsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/gradientai/resources/gpu_droplets/floating_ips/actions.py new file mode 100644 index 00000000..7ba3899d --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/floating_ips/actions.py @@ -0,0 +1,489 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, overload + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.floating_ips import action_create_params +from ....types.gpu_droplets.floating_ips.action_list_response import ActionListResponse +from ....types.gpu_droplets.floating_ips.action_create_response import ActionCreateResponse +from ....types.gpu_droplets.floating_ips.action_retrieve_response import ActionRetrieveResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + @overload + def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + floating_ip: str, + *, + droplet_id: int, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["droplet_id", "type"]) + def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + droplet_id: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._post( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + body=maybe_transform( + { + "type": type, + "droplet_id": droplet_id, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionCreateResponse, + ) + + def retrieve( + self, + action_id: int, + *, + floating_ip: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a floating IP action, send a GET request to + `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._get( + f"/v2/floating_ips/{floating_ip}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + def list( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a floating IP, send a GET + request to `/v2/floating_ips/$FLOATING_IP/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._get( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + floating_ip: str, + *, + droplet_id: int, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["droplet_id", "type"]) + async def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + droplet_id: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._post( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + body=await async_maybe_transform( + { + "type": type, + "droplet_id": droplet_id, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionCreateResponse, + ) + + async def retrieve( + self, + action_id: int, + *, + floating_ip: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a floating IP action, send a GET request to + `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._get( + f"/v2/floating_ips/{floating_ip}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + async def list( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a floating IP, send a GET + request to `/v2/floating_ips/$FLOATING_IP/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._get( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_raw_response_wrapper( + actions.create, + ) + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_raw_response_wrapper( + actions.create, + ) + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py new file mode 100644 index 00000000..cabe012e --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py @@ -0,0 +1,635 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import overload + +import httpx + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import floating_ip_list_params, floating_ip_create_params +from ....types.gpu_droplets.floating_ip_list_response import FloatingIPListResponse +from ....types.gpu_droplets.floating_ip_create_response import FloatingIPCreateResponse +from ....types.gpu_droplets.floating_ip_retrieve_response import FloatingIPRetrieveResponse + +__all__ = ["FloatingIPsResource", "AsyncFloatingIPsResource"] + + +class FloatingIPsResource(SyncAPIResource): + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> FloatingIPsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return FloatingIPsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FloatingIPsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return FloatingIPsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + region: str, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + region: The slug identifier for the region the floating IP will be reserved to. + + project_id: The UUID of the project to which the floating IP will be assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id"], ["region"]) + def create( + self, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + return self._post( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + body=maybe_transform( + { + "droplet_id": droplet_id, + "region": region, + "project_id": project_id, + }, + floating_ip_create_params.FloatingIPCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPCreateResponse, + ) + + def retrieve( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPRetrieveResponse: + """ + To show information about a floating IP, send a GET request to + `/v2/floating_ips/$FLOATING_IP_ADDR`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._get( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPListResponse: + """ + To list all of the floating IPs available on your account, send a GET request to + `/v2/floating_ips`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + floating_ip_list_params.FloatingIPListParams, + ), + ), + cast_to=FloatingIPListResponse, + ) + + def delete( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a floating IP and remove it from your account, send a DELETE request + to `/v2/floating_ips/$FLOATING_IP_ADDR`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncFloatingIPsResource(AsyncAPIResource): + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFloatingIPsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncFloatingIPsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFloatingIPsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncFloatingIPsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + region: str, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + region: The slug identifier for the region the floating IP will be reserved to. + + project_id: The UUID of the project to which the floating IP will be assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id"], ["region"]) + async def create( + self, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + return await self._post( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + body=await async_maybe_transform( + { + "droplet_id": droplet_id, + "region": region, + "project_id": project_id, + }, + floating_ip_create_params.FloatingIPCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPCreateResponse, + ) + + async def retrieve( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPRetrieveResponse: + """ + To show information about a floating IP, send a GET request to + `/v2/floating_ips/$FLOATING_IP_ADDR`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._get( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPListResponse: + """ + To list all of the floating IPs available on your account, send a GET request to + `/v2/floating_ips`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + floating_ip_list_params.FloatingIPListParams, + ), + ), + cast_to=FloatingIPListResponse, + ) + + async def delete( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a floating IP and remove it from your account, send a DELETE request + to `/v2/floating_ips/$FLOATING_IP_ADDR`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class FloatingIPsResourceWithRawResponse: + def __init__(self, floating_ips: FloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = to_raw_response_wrapper( + floating_ips.create, + ) + self.retrieve = to_raw_response_wrapper( + floating_ips.retrieve, + ) + self.list = to_raw_response_wrapper( + floating_ips.list, + ) + self.delete = to_raw_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._floating_ips.actions) + + +class AsyncFloatingIPsResourceWithRawResponse: + def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = async_to_raw_response_wrapper( + floating_ips.create, + ) + self.retrieve = async_to_raw_response_wrapper( + floating_ips.retrieve, + ) + self.list = async_to_raw_response_wrapper( + floating_ips.list, + ) + self.delete = async_to_raw_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._floating_ips.actions) + + +class FloatingIPsResourceWithStreamingResponse: + def __init__(self, floating_ips: FloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = to_streamed_response_wrapper( + floating_ips.create, + ) + self.retrieve = to_streamed_response_wrapper( + floating_ips.retrieve, + ) + self.list = to_streamed_response_wrapper( + floating_ips.list, + ) + self.delete = to_streamed_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._floating_ips.actions) + + +class AsyncFloatingIPsResourceWithStreamingResponse: + def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = async_to_streamed_response_wrapper( + floating_ips.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + floating_ips.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + floating_ips.list, + ) + self.delete = async_to_streamed_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._floating_ips.actions) diff --git a/src/gradientai/resources/gpu_droplets/gpu_droplets.py b/src/gradientai/resources/gpu_droplets/gpu_droplets.py new file mode 100644 index 00000000..cbb07830 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/gpu_droplets.py @@ -0,0 +1,2008 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, List, Union, Optional, cast +from typing_extensions import Literal, overload + +import httpx + +from .sizes import ( + SizesResource, + AsyncSizesResource, + SizesResourceWithRawResponse, + AsyncSizesResourceWithRawResponse, + SizesResourceWithStreamingResponse, + AsyncSizesResourceWithStreamingResponse, +) +from ...types import ( + gpu_droplet_list_params, + gpu_droplet_create_params, + gpu_droplet_list_kernels_params, + gpu_droplet_delete_by_tag_params, + gpu_droplet_list_firewalls_params, + gpu_droplet_list_snapshots_params, +) +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .backups import ( + BackupsResource, + AsyncBackupsResource, + BackupsResourceWithRawResponse, + AsyncBackupsResourceWithRawResponse, + BackupsResourceWithStreamingResponse, + AsyncBackupsResourceWithStreamingResponse, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from .autoscale import ( + AutoscaleResource, + AsyncAutoscaleResource, + AutoscaleResourceWithRawResponse, + AsyncAutoscaleResourceWithRawResponse, + AutoscaleResourceWithStreamingResponse, + AsyncAutoscaleResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .images.images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from ..._base_client import make_request_options +from .account.account import ( + AccountResource, + AsyncAccountResource, + AccountResourceWithRawResponse, + AsyncAccountResourceWithRawResponse, + AccountResourceWithStreamingResponse, + AsyncAccountResourceWithStreamingResponse, +) +from .volumes.volumes import ( + VolumesResource, + AsyncVolumesResource, + VolumesResourceWithRawResponse, + AsyncVolumesResourceWithRawResponse, + VolumesResourceWithStreamingResponse, + AsyncVolumesResourceWithStreamingResponse, +) +from .firewalls.firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + FirewallsResourceWithRawResponse, + AsyncFirewallsResourceWithRawResponse, + FirewallsResourceWithStreamingResponse, + AsyncFirewallsResourceWithStreamingResponse, +) +from .floating_ips.floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + FloatingIPsResourceWithRawResponse, + AsyncFloatingIPsResourceWithRawResponse, + FloatingIPsResourceWithStreamingResponse, + AsyncFloatingIPsResourceWithStreamingResponse, +) +from .load_balancers.load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + LoadBalancersResourceWithRawResponse, + AsyncLoadBalancersResourceWithRawResponse, + LoadBalancersResourceWithStreamingResponse, + AsyncLoadBalancersResourceWithStreamingResponse, +) +from ...types.gpu_droplet_list_response import GPUDropletListResponse +from .destroy_with_associated_resources import ( + DestroyWithAssociatedResourcesResource, + AsyncDestroyWithAssociatedResourcesResource, + DestroyWithAssociatedResourcesResourceWithRawResponse, + AsyncDestroyWithAssociatedResourcesResourceWithRawResponse, + DestroyWithAssociatedResourcesResourceWithStreamingResponse, + AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse, +) +from ...types.droplet_backup_policy_param import DropletBackupPolicyParam +from ...types.gpu_droplet_create_response import GPUDropletCreateResponse +from ...types.gpu_droplet_retrieve_response import GPUDropletRetrieveResponse +from ...types.gpu_droplet_list_kernels_response import GPUDropletListKernelsResponse +from ...types.gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse +from ...types.gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse +from ...types.gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse + +__all__ = ["GPUDropletsResource", "AsyncGPUDropletsResource"] + + +class GPUDropletsResource(SyncAPIResource): + @cached_property + def backups(self) -> BackupsResource: + return BackupsResource(self._client) + + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResource: + return DestroyWithAssociatedResourcesResource(self._client) + + @cached_property + def autoscale(self) -> AutoscaleResource: + return AutoscaleResource(self._client) + + @cached_property + def firewalls(self) -> FirewallsResource: + return FirewallsResource(self._client) + + @cached_property + def floating_ips(self) -> FloatingIPsResource: + return FloatingIPsResource(self._client) + + @cached_property + def images(self) -> ImagesResource: + return ImagesResource(self._client) + + @cached_property + def load_balancers(self) -> LoadBalancersResource: + return LoadBalancersResource(self._client) + + @cached_property + def sizes(self) -> SizesResource: + return SizesResource(self._client) + + @cached_property + def snapshots(self) -> SnapshotsResource: + return SnapshotsResource(self._client) + + @cached_property + def volumes(self) -> VolumesResource: + return VolumesResource(self._client) + + @cached_property + def account(self) -> AccountResource: + return AccountResource(self._client) + + @cached_property + def with_raw_response(self) -> GPUDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return GPUDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GPUDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return GPUDropletsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + image: Union[str, int], + name: str, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + name: The human-readable string you wish to use when displaying the Droplet name. The + name, if set to a domain name managed in the DigitalOcean DNS management system, + will configure a PTR record for the Droplet. The name set during creation will + also determine the hostname for the Droplet in its internal configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + image: Union[str, int], + names: List[str], + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + names: An array of human human-readable strings you wish to use when displaying the + Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS + management system, will configure a PTR record for the Droplet. Each name set + during creation will also determine the hostname for the Droplet in its internal + configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["image", "name", "size"], ["image", "names", "size"]) + def create( + self, + *, + image: Union[str, int], + name: str | NotGiven = NOT_GIVEN, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + names: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + return cast( + GPUDropletCreateResponse, + self._post( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + body=maybe_transform( + { + "image": image, + "name": name, + "size": size, + "backup_policy": backup_policy, + "backups": backups, + "ipv6": ipv6, + "monitoring": monitoring, + "private_networking": private_networking, + "region": region, + "ssh_keys": ssh_keys, + "tags": tags, + "user_data": user_data, + "volumes": volumes, + "vpc_uuid": vpc_uuid, + "with_droplet_agent": with_droplet_agent, + "names": names, + }, + gpu_droplet_create_params.GPUDropletCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, GPUDropletCreateResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + def retrieve( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletRetrieveResponse: + """ + To show information about an individual Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletRetrieveResponse, + ) + + def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListResponse: + """ + To list all Droplets in your account, send a GET request to `/v2/droplets`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing objects each representing a Droplet. These will + contain the standard Droplet attributes. + + ### Filtering Results by Tag + + It's possible to request filtered results by including certain query parameters. + To only list Droplets assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + ### GPU Droplets + + By default, only non-GPU Droplets are returned. To list only GPU Droplets, set + the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`. + + Args: + name: Used to filter list response by Droplet name returning only exact matches. It is + case-insensitive and can not be combined with `tag_name`. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default, + only non-GPU Droplets are returned. Can not be combined with `tag_name`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "tag_name": tag_name, + "type": type, + }, + gpu_droplet_list_params.GPUDropletListParams, + ), + ), + cast_to=GPUDropletListResponse, + ) + + def delete( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_by_tag( + self, + *, + tag_name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete **all** Droplets assigned to a specific tag, include the `tag_name` + query parameter set to the name of the tag in your DELETE request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + This endpoint requires `tag:read` scope. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + tag_name: Specifies Droplets to be deleted by tag. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"tag_name": tag_name}, gpu_droplet_delete_by_tag_params.GPUDropletDeleteByTagParams + ), + ), + cast_to=NoneType, + ) + + def list_firewalls( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListFirewallsResponse: + """ + To retrieve a list of all firewalls available to a Droplet, send a GET request + to `/v2/droplets/$DROPLET_ID/firewalls` + + The response will be a JSON object that has a key called `firewalls`. This will + be set to an array of `firewall` objects, each of which contain the standard + `firewall` attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/firewalls" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_firewalls_params.GPUDropletListFirewallsParams, + ), + ), + cast_to=GPUDropletListFirewallsResponse, + ) + + def list_kernels( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListKernelsResponse: + """ + To retrieve a list of all kernels available to a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/kernels` + + The response will be a JSON object that has a key called `kernels`. This will be + set to an array of `kernel` objects, each of which contain the standard `kernel` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/kernels" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_kernels_params.GPUDropletListKernelsParams, + ), + ), + cast_to=GPUDropletListKernelsResponse, + ) + + def list_neighbors( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListNeighborsResponse: + """To retrieve a list of any "neighbors" (i.e. + + Droplets that are co-located on the + same physical hardware) for a specific Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/neighbors`. + + The results will be returned as a JSON object with a key of `droplets`. This + will be set to an array containing objects representing any other Droplets that + share the same physical hardware. An empty array indicates that the Droplet is + not co-located any other Droplets associated with your account. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/neighbors" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletListNeighborsResponse, + ) + + def list_snapshots( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListSnapshotsResponse: + """ + To retrieve the snapshots that have been created from a Droplet, send a GET + request to `/v2/droplets/$DROPLET_ID/snapshots`. + + You will get back a JSON object that has a `snapshots` key. This will be set to + an array of snapshot objects, each of which contain the standard Droplet + snapshot attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_snapshots_params.GPUDropletListSnapshotsParams, + ), + ), + cast_to=GPUDropletListSnapshotsResponse, + ) + + +class AsyncGPUDropletsResource(AsyncAPIResource): + @cached_property + def backups(self) -> AsyncBackupsResource: + return AsyncBackupsResource(self._client) + + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResource: + return AsyncDestroyWithAssociatedResourcesResource(self._client) + + @cached_property + def autoscale(self) -> AsyncAutoscaleResource: + return AsyncAutoscaleResource(self._client) + + @cached_property + def firewalls(self) -> AsyncFirewallsResource: + return AsyncFirewallsResource(self._client) + + @cached_property + def floating_ips(self) -> AsyncFloatingIPsResource: + return AsyncFloatingIPsResource(self._client) + + @cached_property + def images(self) -> AsyncImagesResource: + return AsyncImagesResource(self._client) + + @cached_property + def load_balancers(self) -> AsyncLoadBalancersResource: + return AsyncLoadBalancersResource(self._client) + + @cached_property + def sizes(self) -> AsyncSizesResource: + return AsyncSizesResource(self._client) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResource: + return AsyncSnapshotsResource(self._client) + + @cached_property + def volumes(self) -> AsyncVolumesResource: + return AsyncVolumesResource(self._client) + + @cached_property + def account(self) -> AsyncAccountResource: + return AsyncAccountResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncGPUDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncGPUDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGPUDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncGPUDropletsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + image: Union[str, int], + name: str, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + name: The human-readable string you wish to use when displaying the Droplet name. The + name, if set to a domain name managed in the DigitalOcean DNS management system, + will configure a PTR record for the Droplet. The name set during creation will + also determine the hostname for the Droplet in its internal configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + image: Union[str, int], + names: List[str], + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + names: An array of human human-readable strings you wish to use when displaying the + Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS + management system, will configure a PTR record for the Droplet. Each name set + during creation will also determine the hostname for the Droplet in its internal + configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["image", "name", "size"], ["image", "names", "size"]) + async def create( + self, + *, + image: Union[str, int], + name: str | NotGiven = NOT_GIVEN, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + names: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + return cast( + GPUDropletCreateResponse, + await self._post( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + body=await async_maybe_transform( + { + "image": image, + "name": name, + "size": size, + "backup_policy": backup_policy, + "backups": backups, + "ipv6": ipv6, + "monitoring": monitoring, + "private_networking": private_networking, + "region": region, + "ssh_keys": ssh_keys, + "tags": tags, + "user_data": user_data, + "volumes": volumes, + "vpc_uuid": vpc_uuid, + "with_droplet_agent": with_droplet_agent, + "names": names, + }, + gpu_droplet_create_params.GPUDropletCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, GPUDropletCreateResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + async def retrieve( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletRetrieveResponse: + """ + To show information about an individual Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletRetrieveResponse, + ) + + async def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListResponse: + """ + To list all Droplets in your account, send a GET request to `/v2/droplets`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing objects each representing a Droplet. These will + contain the standard Droplet attributes. + + ### Filtering Results by Tag + + It's possible to request filtered results by including certain query parameters. + To only list Droplets assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + ### GPU Droplets + + By default, only non-GPU Droplets are returned. To list only GPU Droplets, set + the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`. + + Args: + name: Used to filter list response by Droplet name returning only exact matches. It is + case-insensitive and can not be combined with `tag_name`. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default, + only non-GPU Droplets are returned. Can not be combined with `tag_name`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "tag_name": tag_name, + "type": type, + }, + gpu_droplet_list_params.GPUDropletListParams, + ), + ), + cast_to=GPUDropletListResponse, + ) + + async def delete( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_by_tag( + self, + *, + tag_name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete **all** Droplets assigned to a specific tag, include the `tag_name` + query parameter set to the name of the tag in your DELETE request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + This endpoint requires `tag:read` scope. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + tag_name: Specifies Droplets to be deleted by tag. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"tag_name": tag_name}, gpu_droplet_delete_by_tag_params.GPUDropletDeleteByTagParams + ), + ), + cast_to=NoneType, + ) + + async def list_firewalls( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListFirewallsResponse: + """ + To retrieve a list of all firewalls available to a Droplet, send a GET request + to `/v2/droplets/$DROPLET_ID/firewalls` + + The response will be a JSON object that has a key called `firewalls`. This will + be set to an array of `firewall` objects, each of which contain the standard + `firewall` attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/firewalls" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_firewalls_params.GPUDropletListFirewallsParams, + ), + ), + cast_to=GPUDropletListFirewallsResponse, + ) + + async def list_kernels( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListKernelsResponse: + """ + To retrieve a list of all kernels available to a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/kernels` + + The response will be a JSON object that has a key called `kernels`. This will be + set to an array of `kernel` objects, each of which contain the standard `kernel` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/kernels" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_kernels_params.GPUDropletListKernelsParams, + ), + ), + cast_to=GPUDropletListKernelsResponse, + ) + + async def list_neighbors( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListNeighborsResponse: + """To retrieve a list of any "neighbors" (i.e. + + Droplets that are co-located on the + same physical hardware) for a specific Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/neighbors`. + + The results will be returned as a JSON object with a key of `droplets`. This + will be set to an array containing objects representing any other Droplets that + share the same physical hardware. An empty array indicates that the Droplet is + not co-located any other Droplets associated with your account. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/neighbors" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletListNeighborsResponse, + ) + + async def list_snapshots( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListSnapshotsResponse: + """ + To retrieve the snapshots that have been created from a Droplet, send a GET + request to `/v2/droplets/$DROPLET_ID/snapshots`. + + You will get back a JSON object that has a `snapshots` key. This will be set to + an array of snapshot objects, each of which contain the standard Droplet + snapshot attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_snapshots_params.GPUDropletListSnapshotsParams, + ), + ), + cast_to=GPUDropletListSnapshotsResponse, + ) + + +class GPUDropletsResourceWithRawResponse: + def __init__(self, gpu_droplets: GPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = to_raw_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = to_raw_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = to_raw_response_wrapper( + gpu_droplets.list, + ) + self.delete = to_raw_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = to_raw_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = to_raw_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = to_raw_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = to_raw_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = to_raw_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> BackupsResourceWithRawResponse: + return BackupsResourceWithRawResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse: + return DestroyWithAssociatedResourcesResourceWithRawResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AutoscaleResourceWithRawResponse: + return AutoscaleResourceWithRawResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> FirewallsResourceWithRawResponse: + return FirewallsResourceWithRawResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> FloatingIPsResourceWithRawResponse: + return FloatingIPsResourceWithRawResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> ImagesResourceWithRawResponse: + return ImagesResourceWithRawResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> LoadBalancersResourceWithRawResponse: + return LoadBalancersResourceWithRawResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> SizesResourceWithRawResponse: + return SizesResourceWithRawResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithRawResponse: + return SnapshotsResourceWithRawResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> VolumesResourceWithRawResponse: + return VolumesResourceWithRawResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AccountResourceWithRawResponse: + return AccountResourceWithRawResponse(self._gpu_droplets.account) + + +class AsyncGPUDropletsResourceWithRawResponse: + def __init__(self, gpu_droplets: AsyncGPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = async_to_raw_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = async_to_raw_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = async_to_raw_response_wrapper( + gpu_droplets.list, + ) + self.delete = async_to_raw_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = async_to_raw_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = async_to_raw_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = async_to_raw_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = async_to_raw_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = async_to_raw_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> AsyncBackupsResourceWithRawResponse: + return AsyncBackupsResourceWithRawResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: + return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AsyncAutoscaleResourceWithRawResponse: + return AsyncAutoscaleResourceWithRawResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> AsyncFirewallsResourceWithRawResponse: + return AsyncFirewallsResourceWithRawResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> AsyncFloatingIPsResourceWithRawResponse: + return AsyncFloatingIPsResourceWithRawResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> AsyncImagesResourceWithRawResponse: + return AsyncImagesResourceWithRawResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> AsyncLoadBalancersResourceWithRawResponse: + return AsyncLoadBalancersResourceWithRawResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> AsyncSizesResourceWithRawResponse: + return AsyncSizesResourceWithRawResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse: + return AsyncSnapshotsResourceWithRawResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> AsyncVolumesResourceWithRawResponse: + return AsyncVolumesResourceWithRawResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AsyncAccountResourceWithRawResponse: + return AsyncAccountResourceWithRawResponse(self._gpu_droplets.account) + + +class GPUDropletsResourceWithStreamingResponse: + def __init__(self, gpu_droplets: GPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = to_streamed_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = to_streamed_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = to_streamed_response_wrapper( + gpu_droplets.list, + ) + self.delete = to_streamed_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = to_streamed_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = to_streamed_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = to_streamed_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = to_streamed_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = to_streamed_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> BackupsResourceWithStreamingResponse: + return BackupsResourceWithStreamingResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse: + return DestroyWithAssociatedResourcesResourceWithStreamingResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AutoscaleResourceWithStreamingResponse: + return AutoscaleResourceWithStreamingResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> FirewallsResourceWithStreamingResponse: + return FirewallsResourceWithStreamingResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> FloatingIPsResourceWithStreamingResponse: + return FloatingIPsResourceWithStreamingResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> ImagesResourceWithStreamingResponse: + return ImagesResourceWithStreamingResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> LoadBalancersResourceWithStreamingResponse: + return LoadBalancersResourceWithStreamingResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> SizesResourceWithStreamingResponse: + return SizesResourceWithStreamingResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithStreamingResponse: + return SnapshotsResourceWithStreamingResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> VolumesResourceWithStreamingResponse: + return VolumesResourceWithStreamingResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AccountResourceWithStreamingResponse: + return AccountResourceWithStreamingResponse(self._gpu_droplets.account) + + +class AsyncGPUDropletsResourceWithStreamingResponse: + def __init__(self, gpu_droplets: AsyncGPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = async_to_streamed_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + gpu_droplets.list, + ) + self.delete = async_to_streamed_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = async_to_streamed_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = async_to_streamed_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = async_to_streamed_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = async_to_streamed_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = async_to_streamed_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> AsyncBackupsResourceWithStreamingResponse: + return AsyncBackupsResourceWithStreamingResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: + return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AsyncAutoscaleResourceWithStreamingResponse: + return AsyncAutoscaleResourceWithStreamingResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> AsyncFirewallsResourceWithStreamingResponse: + return AsyncFirewallsResourceWithStreamingResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> AsyncFloatingIPsResourceWithStreamingResponse: + return AsyncFloatingIPsResourceWithStreamingResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> AsyncImagesResourceWithStreamingResponse: + return AsyncImagesResourceWithStreamingResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> AsyncLoadBalancersResourceWithStreamingResponse: + return AsyncLoadBalancersResourceWithStreamingResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> AsyncSizesResourceWithStreamingResponse: + return AsyncSizesResourceWithStreamingResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse: + return AsyncSnapshotsResourceWithStreamingResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> AsyncVolumesResourceWithStreamingResponse: + return AsyncVolumesResourceWithStreamingResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AsyncAccountResourceWithStreamingResponse: + return AsyncAccountResourceWithStreamingResponse(self._gpu_droplets.account) diff --git a/src/gradientai/resources/gpu_droplets/images/__init__.py b/src/gradientai/resources/gpu_droplets/images/__init__.py new file mode 100644 index 00000000..477fd657 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/images/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) + +__all__ = [ + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/images/actions.py b/src/gradientai/resources/gpu_droplets/images/actions.py new file mode 100644 index 00000000..9428418b --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/images/actions.py @@ -0,0 +1,560 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, overload + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.shared.action import Action +from ....types.gpu_droplets.images import action_create_params +from ....types.gpu_droplets.images.action_list_response import ActionListResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + @overload + def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + image_id: int, + *, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + region: The slug identifier for the region where the resource will initially be + available. + + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["region", "type"]) + def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + return self._post( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + body=maybe_transform( + { + "type": type, + "region": region, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + def retrieve( + self, + action_id: int, + *, + image_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + To retrieve the status of an image action, send a GET request to + `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/images/{image_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + def list( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on an image, send a GET request + to `/v2/images/$IMAGE_ID/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + image_id: int, + *, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + region: The slug identifier for the region where the resource will initially be + available. + + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["region", "type"]) + async def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + return await self._post( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + body=await async_maybe_transform( + { + "type": type, + "region": region, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + async def retrieve( + self, + action_id: int, + *, + image_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + To retrieve the status of an image action, send a GET request to + `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/images/{image_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + async def list( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on an image, send a GET request + to `/v2/images/$IMAGE_ID/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_raw_response_wrapper( + actions.create, + ) + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_raw_response_wrapper( + actions.create, + ) + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) diff --git a/src/gradientai/resources/gpu_droplets/images/images.py b/src/gradientai/resources/gpu_droplets/images/images.py new file mode 100644 index 00000000..2c70e793 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/images/images.py @@ -0,0 +1,867 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal + +import httpx + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import image_list_params, image_create_params, image_update_params +from ....types.gpu_droplets.image_list_response import ImageListResponse +from ....types.gpu_droplets.image_create_response import ImageCreateResponse +from ....types.gpu_droplets.image_update_response import ImageUpdateResponse +from ....types.gpu_droplets.image_retrieve_response import ImageRetrieveResponse + +__all__ = ["ImagesResource", "AsyncImagesResource"] + + +class ImagesResource(SyncAPIResource): + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> ImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ImagesResourceWithStreamingResponse(self) + + def create( + self, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + url: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageCreateResponse: + """To create a new custom image, send a POST request to /v2/images. + + The body must + contain a url attribute pointing to a Linux virtual machine image to be imported + into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk + format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB + after being decompressed. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + url: A URL from which the custom Linux virtual machine image may be retrieved. The + image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may + be compressed using gzip or bzip2 and must be smaller than 100 GB after being + decompressed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + body=maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + "region": region, + "tags": tags, + "url": url, + }, + image_create_params.ImageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageCreateResponse, + ) + + def retrieve( + self, + image_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageRetrieveResponse: + """ + To retrieve information about an image, send a `GET` request to + `/v2/images/$IDENTIFIER`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageRetrieveResponse, + ) + + def update( + self, + image_id: int, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageUpdateResponse: + """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. + + Set the + `name` attribute to the new value you would like to use. For custom images, the + `description` and `distribution` attributes may also be updated. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._put( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + body=maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + }, + image_update_params.ImageUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + private: bool | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageListResponse: + """ + To list all of the images available on your account, send a GET request to + /v2/images. + + ## Filtering Results + + --- + + It's possible to request filtered results by including certain query parameters. + + **Image Type** + + Either 1-Click Application or OS Distribution images can be filtered by using + the `type` query parameter. + + > Important: The `type` query parameter does not directly relate to the `type` + > attribute. + + To retrieve only **_distribution_** images, include the `type` query parameter + set to distribution, `/v2/images?type=distribution`. + + To retrieve only **_application_** images, include the `type` query parameter + set to application, `/v2/images?type=application`. + + **User Images** + + To retrieve only the private images of a user, include the `private` query + parameter set to true, `/v2/images?private=true`. + + **Tags** + + To list all images assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/images?tag_name=$TAG_NAME`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + private: Used to filter only user images. + + tag_name: Used to filter images by a specific tag. + + type: Filters results based on image type which can be either `application` or + `distribution`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "private": private, + "tag_name": tag_name, + "type": type, + }, + image_list_params.ImageListParams, + ), + ), + cast_to=ImageListResponse, + ) + + def delete( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a snapshot or custom image, send a `DELETE` request to + `/v2/images/$IMAGE_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncImagesResource(AsyncAPIResource): + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncImagesResourceWithStreamingResponse(self) + + async def create( + self, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + url: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageCreateResponse: + """To create a new custom image, send a POST request to /v2/images. + + The body must + contain a url attribute pointing to a Linux virtual machine image to be imported + into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk + format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB + after being decompressed. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + url: A URL from which the custom Linux virtual machine image may be retrieved. The + image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may + be compressed using gzip or bzip2 and must be smaller than 100 GB after being + decompressed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + body=await async_maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + "region": region, + "tags": tags, + "url": url, + }, + image_create_params.ImageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageCreateResponse, + ) + + async def retrieve( + self, + image_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageRetrieveResponse: + """ + To retrieve information about an image, send a `GET` request to + `/v2/images/$IDENTIFIER`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageRetrieveResponse, + ) + + async def update( + self, + image_id: int, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageUpdateResponse: + """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. + + Set the + `name` attribute to the new value you would like to use. For custom images, the + `description` and `distribution` attributes may also be updated. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._put( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + body=await async_maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + }, + image_update_params.ImageUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + private: bool | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageListResponse: + """ + To list all of the images available on your account, send a GET request to + /v2/images. + + ## Filtering Results + + --- + + It's possible to request filtered results by including certain query parameters. + + **Image Type** + + Either 1-Click Application or OS Distribution images can be filtered by using + the `type` query parameter. + + > Important: The `type` query parameter does not directly relate to the `type` + > attribute. + + To retrieve only **_distribution_** images, include the `type` query parameter + set to distribution, `/v2/images?type=distribution`. + + To retrieve only **_application_** images, include the `type` query parameter + set to application, `/v2/images?type=application`. + + **User Images** + + To retrieve only the private images of a user, include the `private` query + parameter set to true, `/v2/images?private=true`. + + **Tags** + + To list all images assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/images?tag_name=$TAG_NAME`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + private: Used to filter only user images. + + tag_name: Used to filter images by a specific tag. + + type: Filters results based on image type which can be either `application` or + `distribution`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "private": private, + "tag_name": tag_name, + "type": type, + }, + image_list_params.ImageListParams, + ), + ), + cast_to=ImageListResponse, + ) + + async def delete( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a snapshot or custom image, send a `DELETE` request to + `/v2/images/$IMAGE_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class ImagesResourceWithRawResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + self.create = to_raw_response_wrapper( + images.create, + ) + self.retrieve = to_raw_response_wrapper( + images.retrieve, + ) + self.update = to_raw_response_wrapper( + images.update, + ) + self.list = to_raw_response_wrapper( + images.list, + ) + self.delete = to_raw_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._images.actions) + + +class AsyncImagesResourceWithRawResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + self.create = async_to_raw_response_wrapper( + images.create, + ) + self.retrieve = async_to_raw_response_wrapper( + images.retrieve, + ) + self.update = async_to_raw_response_wrapper( + images.update, + ) + self.list = async_to_raw_response_wrapper( + images.list, + ) + self.delete = async_to_raw_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._images.actions) + + +class ImagesResourceWithStreamingResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + self.create = to_streamed_response_wrapper( + images.create, + ) + self.retrieve = to_streamed_response_wrapper( + images.retrieve, + ) + self.update = to_streamed_response_wrapper( + images.update, + ) + self.list = to_streamed_response_wrapper( + images.list, + ) + self.delete = to_streamed_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._images.actions) + + +class AsyncImagesResourceWithStreamingResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + self.create = async_to_streamed_response_wrapper( + images.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + images.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + images.update, + ) + self.list = async_to_streamed_response_wrapper( + images.list, + ) + self.delete = async_to_streamed_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._images.actions) diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py new file mode 100644 index 00000000..2cede1c8 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from .load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + LoadBalancersResourceWithRawResponse, + AsyncLoadBalancersResourceWithRawResponse, + LoadBalancersResourceWithStreamingResponse, + AsyncLoadBalancersResourceWithStreamingResponse, +) +from .forwarding_rules import ( + ForwardingRulesResource, + AsyncForwardingRulesResource, + ForwardingRulesResourceWithRawResponse, + AsyncForwardingRulesResourceWithRawResponse, + ForwardingRulesResourceWithStreamingResponse, + AsyncForwardingRulesResourceWithStreamingResponse, +) + +__all__ = [ + "DropletsResource", + "AsyncDropletsResource", + "DropletsResourceWithRawResponse", + "AsyncDropletsResourceWithRawResponse", + "DropletsResourceWithStreamingResponse", + "AsyncDropletsResourceWithStreamingResponse", + "ForwardingRulesResource", + "AsyncForwardingRulesResource", + "ForwardingRulesResourceWithRawResponse", + "AsyncForwardingRulesResourceWithRawResponse", + "ForwardingRulesResourceWithStreamingResponse", + "AsyncForwardingRulesResourceWithStreamingResponse", + "LoadBalancersResource", + "AsyncLoadBalancersResource", + "LoadBalancersResourceWithRawResponse", + "AsyncLoadBalancersResourceWithRawResponse", + "LoadBalancersResourceWithStreamingResponse", + "AsyncLoadBalancersResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py new file mode 100644 index 00000000..2553a729 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py @@ -0,0 +1,302 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.load_balancers import droplet_add_params, droplet_remove_params + +__all__ = ["DropletsResource", "AsyncDropletsResource"] + + +class DropletsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DropletsResourceWithStreamingResponse(self) + + def add( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a load balancer instance, send a POST request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + Individual Droplets can not be added to a load balancer configured with a + Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity" + response from the API. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a load balancer instance, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDropletsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDropletsResourceWithStreamingResponse(self) + + async def add( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a load balancer instance, send a POST request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + Individual Droplets can not be added to a load balancer configured with a + Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity" + response from the API. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a load balancer instance, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DropletsResourceWithRawResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_raw_response_wrapper( + droplets.add, + ) + self.remove = to_raw_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithRawResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_raw_response_wrapper( + droplets.add, + ) + self.remove = async_to_raw_response_wrapper( + droplets.remove, + ) + + +class DropletsResourceWithStreamingResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_streamed_response_wrapper( + droplets.add, + ) + self.remove = to_streamed_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithStreamingResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_streamed_response_wrapper( + droplets.add, + ) + self.remove = async_to_streamed_response_wrapper( + droplets.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py new file mode 100644 index 00000000..2ba20f88 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py @@ -0,0 +1,301 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.load_balancers import forwarding_rule_add_params, forwarding_rule_remove_params +from ....types.gpu_droplets.forwarding_rule_param import ForwardingRuleParam + +__all__ = ["ForwardingRulesResource", "AsyncForwardingRulesResource"] + + +class ForwardingRulesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ForwardingRulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ForwardingRulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ForwardingRulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ForwardingRulesResourceWithStreamingResponse(self) + + def add( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add an additional forwarding rule to a load balancer instance, send a POST + request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body + of the request, there should be a `forwarding_rules` attribute containing an + array of rules to be added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove forwarding rules from a load balancer instance, send a DELETE request + to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the + request, there should be a `forwarding_rules` attribute containing an array of + rules to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncForwardingRulesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncForwardingRulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncForwardingRulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncForwardingRulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncForwardingRulesResourceWithStreamingResponse(self) + + async def add( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add an additional forwarding rule to a load balancer instance, send a POST + request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body + of the request, there should be a `forwarding_rules` attribute containing an + array of rules to be added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=await async_maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove forwarding rules from a load balancer instance, send a DELETE request + to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the + request, there should be a `forwarding_rules` attribute containing an array of + rules to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=await async_maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class ForwardingRulesResourceWithRawResponse: + def __init__(self, forwarding_rules: ForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = to_raw_response_wrapper( + forwarding_rules.add, + ) + self.remove = to_raw_response_wrapper( + forwarding_rules.remove, + ) + + +class AsyncForwardingRulesResourceWithRawResponse: + def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = async_to_raw_response_wrapper( + forwarding_rules.add, + ) + self.remove = async_to_raw_response_wrapper( + forwarding_rules.remove, + ) + + +class ForwardingRulesResourceWithStreamingResponse: + def __init__(self, forwarding_rules: ForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = to_streamed_response_wrapper( + forwarding_rules.add, + ) + self.remove = to_streamed_response_wrapper( + forwarding_rules.remove, + ) + + +class AsyncForwardingRulesResourceWithStreamingResponse: + def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = async_to_streamed_response_wrapper( + forwarding_rules.add, + ) + self.remove = async_to_streamed_response_wrapper( + forwarding_rules.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py new file mode 100644 index 00000000..c724b6d9 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py @@ -0,0 +1,2205 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import Literal, overload + +import httpx + +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from .forwarding_rules import ( + ForwardingRulesResource, + AsyncForwardingRulesResource, + ForwardingRulesResourceWithRawResponse, + AsyncForwardingRulesResourceWithRawResponse, + ForwardingRulesResourceWithStreamingResponse, + AsyncForwardingRulesResourceWithStreamingResponse, +) +from ....types.gpu_droplets import ( + load_balancer_list_params, + load_balancer_create_params, + load_balancer_update_params, +) +from ....types.gpu_droplets.domains_param import DomainsParam +from ....types.gpu_droplets.lb_firewall_param import LbFirewallParam +from ....types.gpu_droplets.glb_settings_param import GlbSettingsParam +from ....types.gpu_droplets.health_check_param import HealthCheckParam +from ....types.gpu_droplets.forwarding_rule_param import ForwardingRuleParam +from ....types.gpu_droplets.sticky_sessions_param import StickySessionsParam +from ....types.gpu_droplets.load_balancer_list_response import LoadBalancerListResponse +from ....types.gpu_droplets.load_balancer_create_response import LoadBalancerCreateResponse +from ....types.gpu_droplets.load_balancer_update_response import LoadBalancerUpdateResponse +from ....types.gpu_droplets.load_balancer_retrieve_response import LoadBalancerRetrieveResponse + +__all__ = ["LoadBalancersResource", "AsyncLoadBalancersResource"] + + +class LoadBalancersResource(SyncAPIResource): + @cached_property + def droplets(self) -> DropletsResource: + return DropletsResource(self._client) + + @cached_property + def forwarding_rules(self) -> ForwardingRulesResource: + return ForwardingRulesResource(self._client) + + @cached_property + def with_raw_response(self) -> LoadBalancersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return LoadBalancersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> LoadBalancersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return LoadBalancersResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + return self._post( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + body=maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_create_params.LoadBalancerCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerCreateResponse, + ) + + def retrieve( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerRetrieveResponse: + """ + To show information about a load balancer instance, send a GET request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return self._get( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerRetrieveResponse, + ) + + @overload + def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return self._put( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + body=maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_update_params.LoadBalancerUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerListResponse: + """ + To list all of the load balancer instances on your account, send a GET request + to `/v2/load_balancers`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + load_balancer_list_params.LoadBalancerListParams, + ), + ), + cast_to=LoadBalancerListResponse, + ) + + def delete( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a load balancer instance, disassociating any Droplets assigned to it + and removing it from your account, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_cache( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Global load balancer CDN cache, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/cache`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}/cache" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncLoadBalancersResource(AsyncAPIResource): + @cached_property + def droplets(self) -> AsyncDropletsResource: + return AsyncDropletsResource(self._client) + + @cached_property + def forwarding_rules(self) -> AsyncForwardingRulesResource: + return AsyncForwardingRulesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncLoadBalancersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncLoadBalancersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncLoadBalancersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncLoadBalancersResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + async def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + return await self._post( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + body=await async_maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_create_params.LoadBalancerCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerCreateResponse, + ) + + async def retrieve( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerRetrieveResponse: + """ + To show information about a load balancer instance, send a GET request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return await self._get( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerRetrieveResponse, + ) + + @overload + async def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + async def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return await self._put( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + body=await async_maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_update_params.LoadBalancerUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerListResponse: + """ + To list all of the load balancer instances on your account, send a GET request + to `/v2/load_balancers`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + load_balancer_list_params.LoadBalancerListParams, + ), + ), + cast_to=LoadBalancerListResponse, + ) + + async def delete( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a load balancer instance, disassociating any Droplets assigned to it + and removing it from your account, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_cache( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Global load balancer CDN cache, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/cache`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}/cache" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class LoadBalancersResourceWithRawResponse: + def __init__(self, load_balancers: LoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = to_raw_response_wrapper( + load_balancers.create, + ) + self.retrieve = to_raw_response_wrapper( + load_balancers.retrieve, + ) + self.update = to_raw_response_wrapper( + load_balancers.update, + ) + self.list = to_raw_response_wrapper( + load_balancers.list, + ) + self.delete = to_raw_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = to_raw_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithRawResponse: + return DropletsResourceWithRawResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> ForwardingRulesResourceWithRawResponse: + return ForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules) + + +class AsyncLoadBalancersResourceWithRawResponse: + def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = async_to_raw_response_wrapper( + load_balancers.create, + ) + self.retrieve = async_to_raw_response_wrapper( + load_balancers.retrieve, + ) + self.update = async_to_raw_response_wrapper( + load_balancers.update, + ) + self.list = async_to_raw_response_wrapper( + load_balancers.list, + ) + self.delete = async_to_raw_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = async_to_raw_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithRawResponse: + return AsyncDropletsResourceWithRawResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> AsyncForwardingRulesResourceWithRawResponse: + return AsyncForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules) + + +class LoadBalancersResourceWithStreamingResponse: + def __init__(self, load_balancers: LoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = to_streamed_response_wrapper( + load_balancers.create, + ) + self.retrieve = to_streamed_response_wrapper( + load_balancers.retrieve, + ) + self.update = to_streamed_response_wrapper( + load_balancers.update, + ) + self.list = to_streamed_response_wrapper( + load_balancers.list, + ) + self.delete = to_streamed_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = to_streamed_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithStreamingResponse: + return DropletsResourceWithStreamingResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> ForwardingRulesResourceWithStreamingResponse: + return ForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules) + + +class AsyncLoadBalancersResourceWithStreamingResponse: + def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = async_to_streamed_response_wrapper( + load_balancers.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + load_balancers.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + load_balancers.update, + ) + self.list = async_to_streamed_response_wrapper( + load_balancers.list, + ) + self.delete = async_to_streamed_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = async_to_streamed_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithStreamingResponse: + return AsyncDropletsResourceWithStreamingResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> AsyncForwardingRulesResourceWithStreamingResponse: + return AsyncForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules) diff --git a/src/gradientai/resources/gpu_droplets/sizes.py b/src/gradientai/resources/gpu_droplets/sizes.py new file mode 100644 index 00000000..e37116c7 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/sizes.py @@ -0,0 +1,199 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import size_list_params +from ...types.gpu_droplets.size_list_response import SizeListResponse + +__all__ = ["SizesResource", "AsyncSizesResource"] + + +class SizesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SizesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SizesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SizesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SizesResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SizeListResponse: + """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. + + The + response will be a JSON object with a key called `sizes`. The value of this will + be an array of `size` objects each of which contain the standard size + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + size_list_params.SizeListParams, + ), + ), + cast_to=SizeListResponse, + ) + + +class AsyncSizesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSizesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSizesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSizesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSizesResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SizeListResponse: + """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. + + The + response will be a JSON object with a key called `sizes`. The value of this will + be an array of `size` objects each of which contain the standard size + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + size_list_params.SizeListParams, + ), + ), + cast_to=SizeListResponse, + ) + + +class SizesResourceWithRawResponse: + def __init__(self, sizes: SizesResource) -> None: + self._sizes = sizes + + self.list = to_raw_response_wrapper( + sizes.list, + ) + + +class AsyncSizesResourceWithRawResponse: + def __init__(self, sizes: AsyncSizesResource) -> None: + self._sizes = sizes + + self.list = async_to_raw_response_wrapper( + sizes.list, + ) + + +class SizesResourceWithStreamingResponse: + def __init__(self, sizes: SizesResource) -> None: + self._sizes = sizes + + self.list = to_streamed_response_wrapper( + sizes.list, + ) + + +class AsyncSizesResourceWithStreamingResponse: + def __init__(self, sizes: AsyncSizesResource) -> None: + self._sizes = sizes + + self.list = async_to_streamed_response_wrapper( + sizes.list, + ) diff --git a/src/gradientai/resources/gpu_droplets/snapshots.py b/src/gradientai/resources/gpu_droplets/snapshots.py new file mode 100644 index 00000000..081ab5b8 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/snapshots.py @@ -0,0 +1,425 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import snapshot_list_params +from ...types.gpu_droplets.snapshot_list_response import SnapshotListResponse +from ...types.gpu_droplets.snapshot_retrieve_response import SnapshotRetrieveResponse + +__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] + + +class SnapshotsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SnapshotsResourceWithStreamingResponse(self) + + def retrieve( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve information about a snapshot, send a GET request to + `/v2/snapshots/$SNAPSHOT_ID`. + + The response will be a JSON object with a key called `snapshot`. The value of + this will be an snapshot object containing the standard snapshot attributes. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To list all of the snapshots available on your account, send a GET request to + `/v2/snapshots`. + + The response will be a JSON object with a key called `snapshots`. This will be + set to an array of `snapshot` objects, each of which will contain the standard + snapshot attributes. + + ### Filtering Results by Resource Type + + It's possible to request filtered results by including certain query parameters. + + #### List Droplet Snapshots + + To retrieve only snapshots based on Droplets, include the `resource_type` query + parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`. + + #### List Volume Snapshots + + To retrieve only snapshots based on volumes, include the `resource_type` query + parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + resource_type: Used to filter snapshots by a resource type. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "resource_type": resource_type, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + def delete( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Both Droplet and volume snapshots are managed through the `/v2/snapshots/` + endpoint. To delete a snapshot, send a DELETE request to + `/v2/snapshots/$SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncSnapshotsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSnapshotsResourceWithStreamingResponse(self) + + async def retrieve( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve information about a snapshot, send a GET request to + `/v2/snapshots/$SNAPSHOT_ID`. + + The response will be a JSON object with a key called `snapshot`. The value of + this will be an snapshot object containing the standard snapshot attributes. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To list all of the snapshots available on your account, send a GET request to + `/v2/snapshots`. + + The response will be a JSON object with a key called `snapshots`. This will be + set to an array of `snapshot` objects, each of which will contain the standard + snapshot attributes. + + ### Filtering Results by Resource Type + + It's possible to request filtered results by including certain query parameters. + + #### List Droplet Snapshots + + To retrieve only snapshots based on Droplets, include the `resource_type` query + parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`. + + #### List Volume Snapshots + + To retrieve only snapshots based on volumes, include the `resource_type` query + parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + resource_type: Used to filter snapshots by a resource type. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "resource_type": resource_type, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + async def delete( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Both Droplet and volume snapshots are managed through the `/v2/snapshots/` + endpoint. To delete a snapshot, send a DELETE request to + `/v2/snapshots/$SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class SnapshotsResourceWithRawResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = to_raw_response_wrapper( + snapshots.list, + ) + self.delete = to_raw_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithRawResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = async_to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_raw_response_wrapper( + snapshots.list, + ) + self.delete = async_to_raw_response_wrapper( + snapshots.delete, + ) + + +class SnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = to_streamed_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = async_to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = async_to_streamed_response_wrapper( + snapshots.delete, + ) diff --git a/src/gradientai/resources/gpu_droplets/volumes/__init__.py b/src/gradientai/resources/gpu_droplets/volumes/__init__.py new file mode 100644 index 00000000..167db0b3 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .volumes import ( + VolumesResource, + AsyncVolumesResource, + VolumesResourceWithRawResponse, + AsyncVolumesResourceWithRawResponse, + VolumesResourceWithStreamingResponse, + AsyncVolumesResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) + +__all__ = [ + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "SnapshotsResource", + "AsyncSnapshotsResource", + "SnapshotsResourceWithRawResponse", + "AsyncSnapshotsResourceWithRawResponse", + "SnapshotsResourceWithStreamingResponse", + "AsyncSnapshotsResourceWithStreamingResponse", + "VolumesResource", + "AsyncVolumesResource", + "VolumesResourceWithRawResponse", + "AsyncVolumesResourceWithRawResponse", + "VolumesResourceWithStreamingResponse", + "AsyncVolumesResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/volumes/actions.py b/src/gradientai/resources/gpu_droplets/volumes/actions.py new file mode 100644 index 00000000..9d925397 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/actions.py @@ -0,0 +1,1554 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, overload + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.volumes import ( + action_list_params, + action_retrieve_params, + action_initiate_by_id_params, + action_initiate_by_name_params, +) +from ....types.gpu_droplets.volumes.action_list_response import ActionListResponse +from ....types.gpu_droplets.volumes.action_retrieve_response import ActionRetrieveResponse +from ....types.gpu_droplets.volumes.action_initiate_by_id_response import ActionInitiateByIDResponse +from ....types.gpu_droplets.volumes.action_initiate_by_name_response import ActionInitiateByNameResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + def retrieve( + self, + action_id: int, + *, + volume_id: str, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a volume action, send a GET request to + `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_retrieve_params.ActionRetrieveParams, + ), + ), + cast_to=ActionRetrieveResponse, + ) + + def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a volume, send a GET request + to `/v2/volumes/$VOLUME_ID/actions`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_by_id( + self, + volume_id: str, + *, + size_gigabytes: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + size_gigabytes: The new size of the block storage volume in GiB (1024^3). + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"], ["size_gigabytes", "type"]) + def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + size_gigabytes: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._post( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + body=maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + "size_gigabytes": size_gigabytes, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + ), + cast_to=ActionInitiateByIDResponse, + ) + + @overload + def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"]) + def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + return self._post( + "/v2/volumes/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/volumes/actions", + body=maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + ), + cast_to=ActionInitiateByNameResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + async def retrieve( + self, + action_id: int, + *, + volume_id: str, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a volume action, send a GET request to + `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_retrieve_params.ActionRetrieveParams, + ), + ), + cast_to=ActionRetrieveResponse, + ) + + async def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a volume, send a GET request + to `/v2/volumes/$VOLUME_ID/actions`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + async def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_by_id( + self, + volume_id: str, + *, + size_gigabytes: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + size_gigabytes: The new size of the block storage volume in GiB (1024^3). + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"], ["size_gigabytes", "type"]) + async def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + size_gigabytes: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._post( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + body=await async_maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + "size_gigabytes": size_gigabytes, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + ), + cast_to=ActionInitiateByIDResponse, + ) + + @overload + async def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"]) + async def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + return await self._post( + "/v2/volumes/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/volumes/actions", + body=await async_maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + ), + cast_to=ActionInitiateByNameResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + self.initiate_by_id = to_raw_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = to_raw_response_wrapper( + actions.initiate_by_name, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + self.initiate_by_id = async_to_raw_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = async_to_raw_response_wrapper( + actions.initiate_by_name, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + self.initiate_by_id = to_streamed_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = to_streamed_response_wrapper( + actions.initiate_by_name, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) + self.initiate_by_id = async_to_streamed_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = async_to_streamed_response_wrapper( + actions.initiate_by_name, + ) diff --git a/src/gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/gradientai/resources/gpu_droplets/volumes/snapshots.py new file mode 100644 index 00000000..766d9a3a --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/snapshots.py @@ -0,0 +1,499 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.volumes import snapshot_list_params, snapshot_create_params +from ....types.gpu_droplets.volumes.snapshot_list_response import SnapshotListResponse +from ....types.gpu_droplets.volumes.snapshot_create_response import SnapshotCreateResponse +from ....types.gpu_droplets.volumes.snapshot_retrieve_response import SnapshotRetrieveResponse + +__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] + + +class SnapshotsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SnapshotsResourceWithStreamingResponse(self) + + def create( + self, + volume_id: str, + *, + name: str, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotCreateResponse: + """ + To create a snapshot from a volume, sent a POST request to + `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + name: A human-readable name for the volume snapshot. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._post( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + body=maybe_transform( + { + "name": name, + "tags": tags, + }, + snapshot_create_params.SnapshotCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotCreateResponse, + ) + + def retrieve( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve the details of a snapshot that has been created from a volume, send + a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + return self._get( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To retrieve the snapshots that have been created from a volume, send a GET + request to `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + def delete( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a volume snapshot, send a DELETE request to + `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncSnapshotsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSnapshotsResourceWithStreamingResponse(self) + + async def create( + self, + volume_id: str, + *, + name: str, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotCreateResponse: + """ + To create a snapshot from a volume, sent a POST request to + `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + name: A human-readable name for the volume snapshot. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._post( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + body=await async_maybe_transform( + { + "name": name, + "tags": tags, + }, + snapshot_create_params.SnapshotCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotCreateResponse, + ) + + async def retrieve( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve the details of a snapshot that has been created from a volume, send + a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + return await self._get( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + async def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To retrieve the snapshots that have been created from a volume, send a GET + request to `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + async def delete( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a volume snapshot, send a DELETE request to + `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class SnapshotsResourceWithRawResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = to_raw_response_wrapper( + snapshots.create, + ) + self.retrieve = to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = to_raw_response_wrapper( + snapshots.list, + ) + self.delete = to_raw_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithRawResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = async_to_raw_response_wrapper( + snapshots.create, + ) + self.retrieve = async_to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_raw_response_wrapper( + snapshots.list, + ) + self.delete = async_to_raw_response_wrapper( + snapshots.delete, + ) + + +class SnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = to_streamed_response_wrapper( + snapshots.create, + ) + self.retrieve = to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = to_streamed_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = async_to_streamed_response_wrapper( + snapshots.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = async_to_streamed_response_wrapper( + snapshots.delete, + ) diff --git a/src/gradientai/resources/gpu_droplets/volumes/volumes.py b/src/gradientai/resources/gpu_droplets/volumes/volumes.py new file mode 100644 index 00000000..efd1d4ae --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/volumes.py @@ -0,0 +1,1144 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, overload + +import httpx + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import volume_list_params, volume_create_params, volume_delete_by_name_params +from ....types.gpu_droplets.volume_list_response import VolumeListResponse +from ....types.gpu_droplets.volume_create_response import VolumeCreateResponse +from ....types.gpu_droplets.volume_retrieve_response import VolumeRetrieveResponse + +__all__ = ["VolumesResource", "AsyncVolumesResource"] + + +class VolumesResource(SyncAPIResource): + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def snapshots(self) -> SnapshotsResource: + return SnapshotsResource(self._client) + + @cached_property + def with_raw_response(self) -> VolumesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return VolumesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> VolumesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return VolumesResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["name", "region", "size_gigabytes"]) + def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + return self._post( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + body=maybe_transform( + { + "name": name, + "region": region, + "size_gigabytes": size_gigabytes, + "description": description, + "filesystem_label": filesystem_label, + "filesystem_type": filesystem_type, + "snapshot_id": snapshot_id, + "tags": tags, + }, + volume_create_params.VolumeCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeCreateResponse, + ) + + def retrieve( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeRetrieveResponse: + """ + To show information about a block storage volume, send a GET request to + `/v2/volumes/$VOLUME_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeRetrieveResponse, + ) + + def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeListResponse: + """ + To list all of the block storage volumes available on your account, send a GET + request to `/v2/volumes`. + + ## Filtering Results + + ### By Region + + The `region` may be provided as query parameter in order to restrict results to + volumes available in a specific region. For example: `/v2/volumes?region=nyc1` + + ### By Name + + It is also possible to list volumes on your account that match a specified name. + To do so, send a GET request with the volume's name as a query parameter to + `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per + region with the same name. + + ### By Name and Region + + It is also possible to retrieve information about a block storage volume by + name. To do so, send a GET request with the volume's name and the region slug + for the region it is located in as query parameters to + `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. + + Args: + name: The block storage volume's name. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "region": region, + }, + volume_list_params.VolumeListParams, + ), + ), + cast_to=VolumeListResponse, + ) + + def delete( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a block storage volume, destroying all data and removing it from your + account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body + will be sent back, but the response code will indicate success. Specifically, + the response code will be a 204, which means that the action was successful with + no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_by_name( + self, + *, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Block storage volumes may also be deleted by name by sending a DELETE request + with the volume's **name** and the **region slug** for the region it is located + in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No + response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + name: The block storage volume's name. + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "region": region, + }, + volume_delete_by_name_params.VolumeDeleteByNameParams, + ), + ), + cast_to=NoneType, + ) + + +class AsyncVolumesResource(AsyncAPIResource): + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResource: + return AsyncSnapshotsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncVolumesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncVolumesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncVolumesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncVolumesResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["name", "region", "size_gigabytes"]) + async def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + return await self._post( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + body=await async_maybe_transform( + { + "name": name, + "region": region, + "size_gigabytes": size_gigabytes, + "description": description, + "filesystem_label": filesystem_label, + "filesystem_type": filesystem_type, + "snapshot_id": snapshot_id, + "tags": tags, + }, + volume_create_params.VolumeCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeCreateResponse, + ) + + async def retrieve( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeRetrieveResponse: + """ + To show information about a block storage volume, send a GET request to + `/v2/volumes/$VOLUME_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeRetrieveResponse, + ) + + async def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeListResponse: + """ + To list all of the block storage volumes available on your account, send a GET + request to `/v2/volumes`. + + ## Filtering Results + + ### By Region + + The `region` may be provided as query parameter in order to restrict results to + volumes available in a specific region. For example: `/v2/volumes?region=nyc1` + + ### By Name + + It is also possible to list volumes on your account that match a specified name. + To do so, send a GET request with the volume's name as a query parameter to + `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per + region with the same name. + + ### By Name and Region + + It is also possible to retrieve information about a block storage volume by + name. To do so, send a GET request with the volume's name and the region slug + for the region it is located in as query parameters to + `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. + + Args: + name: The block storage volume's name. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "region": region, + }, + volume_list_params.VolumeListParams, + ), + ), + cast_to=VolumeListResponse, + ) + + async def delete( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a block storage volume, destroying all data and removing it from your + account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body + will be sent back, but the response code will indicate success. Specifically, + the response code will be a 204, which means that the action was successful with + no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_by_name( + self, + *, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Block storage volumes may also be deleted by name by sending a DELETE request + with the volume's **name** and the **region slug** for the region it is located + in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No + response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + name: The block storage volume's name. + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "region": region, + }, + volume_delete_by_name_params.VolumeDeleteByNameParams, + ), + ), + cast_to=NoneType, + ) + + +class VolumesResourceWithRawResponse: + def __init__(self, volumes: VolumesResource) -> None: + self._volumes = volumes + + self.create = to_raw_response_wrapper( + volumes.create, + ) + self.retrieve = to_raw_response_wrapper( + volumes.retrieve, + ) + self.list = to_raw_response_wrapper( + volumes.list, + ) + self.delete = to_raw_response_wrapper( + volumes.delete, + ) + self.delete_by_name = to_raw_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithRawResponse: + return SnapshotsResourceWithRawResponse(self._volumes.snapshots) + + +class AsyncVolumesResourceWithRawResponse: + def __init__(self, volumes: AsyncVolumesResource) -> None: + self._volumes = volumes + + self.create = async_to_raw_response_wrapper( + volumes.create, + ) + self.retrieve = async_to_raw_response_wrapper( + volumes.retrieve, + ) + self.list = async_to_raw_response_wrapper( + volumes.list, + ) + self.delete = async_to_raw_response_wrapper( + volumes.delete, + ) + self.delete_by_name = async_to_raw_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse: + return AsyncSnapshotsResourceWithRawResponse(self._volumes.snapshots) + + +class VolumesResourceWithStreamingResponse: + def __init__(self, volumes: VolumesResource) -> None: + self._volumes = volumes + + self.create = to_streamed_response_wrapper( + volumes.create, + ) + self.retrieve = to_streamed_response_wrapper( + volumes.retrieve, + ) + self.list = to_streamed_response_wrapper( + volumes.list, + ) + self.delete = to_streamed_response_wrapper( + volumes.delete, + ) + self.delete_by_name = to_streamed_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithStreamingResponse: + return SnapshotsResourceWithStreamingResponse(self._volumes.snapshots) + + +class AsyncVolumesResourceWithStreamingResponse: + def __init__(self, volumes: AsyncVolumesResource) -> None: + self._volumes = volumes + + self.create = async_to_streamed_response_wrapper( + volumes.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + volumes.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + volumes.list, + ) + self.delete = async_to_streamed_response_wrapper( + volumes.delete, + ) + self.delete_by_name = async_to_streamed_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse: + return AsyncSnapshotsResourceWithStreamingResponse(self._volumes.snapshots) diff --git a/src/gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py index 6759d09c..238ef6f6 100644 --- a/src/gradientai/resources/inference/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -60,6 +60,8 @@ def create( To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. Args: + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -97,6 +99,10 @@ def update( `/v2/gen-ai/models/api_keys/{api_key_uuid}`. Args: + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -140,9 +146,9 @@ def list( To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -280,6 +286,8 @@ async def create( To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. Args: + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -317,6 +325,10 @@ async def update( `/v2/gen-ai/models/api_keys/{api_key_uuid}`. Args: + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -360,9 +372,9 @@ async def list( To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index e05696b9..8357dfda 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -69,6 +69,14 @@ def create( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: + aws_data_source: AWS S3 Data Source + + body_knowledge_base_uuid: Knowledge base id + + spaces_data_source: Spaces Bucket Data Source + + web_crawler_data_source: WebCrawlerDataSource + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -118,9 +126,9 @@ def list( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -236,6 +244,14 @@ async def create( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: + aws_data_source: AWS S3 Data Source + + body_knowledge_base_uuid: Knowledge base id + + spaces_data_source: Spaces Bucket Data Source + + web_crawler_data_source: WebCrawlerDataSource + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -285,9 +301,9 @@ async def list( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py index 39151e41..891acd0b 100644 --- a/src/gradientai/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradientai/resources/knowledge_bases/indexing_jobs.py @@ -68,6 +68,11 @@ def create( `/v2/gen-ai/indexing_jobs`. Args: + data_source_uuids: List of data source ids to index, if none are provided, all data sources will be + indexed + + knowledge_base_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -146,9 +151,9 @@ def list( `/v2/gen-ai/indexing_jobs`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -294,6 +299,11 @@ async def create( `/v2/gen-ai/indexing_jobs`. Args: + data_source_uuids: List of data source ids to index, if none are provided, all data sources will be + indexed + + knowledge_base_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -372,9 +382,9 @@ async def list( `/v2/gen-ai/indexing_jobs`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 28acdd7f..c181295c 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -112,6 +112,8 @@ def create( tags: Tags to organize your knowledge base. + vpc_uuid: The VPC to deploy the knowledge base database in + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -201,12 +203,18 @@ def update( `/v2/gen-ai/knowledge_bases/{uuid}`. Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + database_id: The id of the DigitalOcean database this knowledge base will use, optiona. embedding_model_uuid: Identifier for the foundation model. + name: Knowledge base name + + project_id: The id of the DigitalOcean project this knowledge base will belong to + tags: Tags to organize your knowledge base. + body_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -254,9 +262,9 @@ def list( To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -392,6 +400,8 @@ async def create( tags: Tags to organize your knowledge base. + vpc_uuid: The VPC to deploy the knowledge base database in + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -481,12 +491,18 @@ async def update( `/v2/gen-ai/knowledge_bases/{uuid}`. Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + database_id: The id of the DigitalOcean database this knowledge base will use, optiona. embedding_model_uuid: Identifier for the foundation model. + name: Knowledge base name + + project_id: The id of the DigitalOcean project this knowledge base will belong to + tags: Tags to organize your knowledge base. + body_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -534,9 +550,9 @@ async def list( To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/models/models.py b/src/gradientai/resources/models/models.py index 3c524767..41f2eabd 100644 --- a/src/gradientai/resources/models/models.py +++ b/src/gradientai/resources/models/models.py @@ -2,14 +2,9 @@ from __future__ import annotations -from typing import List -from typing_extensions import Literal - import httpx -from ...types import model_list_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -28,6 +23,7 @@ AsyncProvidersResourceWithStreamingResponse, ) from ...types.model_list_response import ModelListResponse +from ...types.model_retrieve_response import ModelRetrieveResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -56,52 +52,22 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def list( + def retrieve( self, + model: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> ModelRetrieveResponse: """ - To list all models, send a GET request to `/v2/gen-ai/models`. + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -110,24 +76,36 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - "/v2/gen-ai/models" + f"/models/{model}" if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelListResponse, ) @@ -157,52 +135,22 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def list( + async def retrieve( self, + model: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> ModelRetrieveResponse: """ - To list all models, send a GET request to `/v2/gen-ai/models`. + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -211,24 +159,36 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - "/v2/gen-ai/models" + f"/models/{model}" if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return await self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelListResponse, ) @@ -238,6 +198,9 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models + self.retrieve = to_raw_response_wrapper( + models.retrieve, + ) self.list = to_raw_response_wrapper( models.list, ) @@ -251,6 +214,9 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models + self.retrieve = async_to_raw_response_wrapper( + models.retrieve, + ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -264,6 +230,9 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models + self.retrieve = to_streamed_response_wrapper( + models.retrieve, + ) self.list = to_streamed_response_wrapper( models.list, ) @@ -277,6 +246,9 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models + self.retrieve = async_to_streamed_response_wrapper( + models.retrieve, + ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/gradientai/resources/models/providers/anthropic.py b/src/gradientai/resources/models/providers/anthropic.py index 26c9b977..e570be51 100644 --- a/src/gradientai/resources/models/providers/anthropic.py +++ b/src/gradientai/resources/models/providers/anthropic.py @@ -68,6 +68,10 @@ def create( `/v2/gen-ai/anthropic/keys`. Args: + api_key: Anthropic API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -148,6 +152,12 @@ def update( `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. Args: + api_key: Anthropic API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -193,9 +203,9 @@ def list( `/v2/gen-ai/anthropic/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -278,9 +288,9 @@ def list_agents( List Agents by Anthropic Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -350,6 +360,10 @@ async def create( `/v2/gen-ai/anthropic/keys`. Args: + api_key: Anthropic API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -430,6 +444,12 @@ async def update( `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. Args: + api_key: Anthropic API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -475,9 +495,9 @@ async def list( `/v2/gen-ai/anthropic/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -560,9 +580,9 @@ async def list_agents( List Agents by Anthropic Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/models/providers/openai.py b/src/gradientai/resources/models/providers/openai.py index d337cd9b..ccd594b8 100644 --- a/src/gradientai/resources/models/providers/openai.py +++ b/src/gradientai/resources/models/providers/openai.py @@ -67,6 +67,10 @@ def create( To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. Args: + api_key: OpenAI API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -147,6 +151,12 @@ def update( `/v2/gen-ai/openai/keys/{api_key_uuid}`. Args: + api_key: OpenAI API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -191,9 +201,9 @@ def list( To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -276,9 +286,9 @@ def retrieve_agents( List Agents by OpenAI Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -347,6 +357,10 @@ async def create( To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. Args: + api_key: OpenAI API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -427,6 +441,12 @@ async def update( `/v2/gen-ai/openai/keys/{api_key_uuid}`. Args: + api_key: OpenAI API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -471,9 +491,9 @@ async def list( To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -556,9 +576,9 @@ async def retrieve_agents( List Agents by OpenAI Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index 4c50d9e6..e953e4f3 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -44,8 +44,8 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: def list( self, *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -54,12 +54,15 @@ def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RegionListResponse: """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + To list all of the regions that are available, send a GET request to + `/v2/regions`. The response will be a JSON object with a key called `regions`. + The value of this will be an array of `region` objects, each of which will + contain the standard region attributes. Args: - serves_batch: include datacenters that are capable of running batch jobs. + page: Which 'page' of paginated results to return. - serves_inference: include datacenters that serve inference. + per_page: Number of items returned per page extra_headers: Send extra headers @@ -70,9 +73,7 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", + "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -80,8 +81,8 @@ def list( timeout=timeout, query=maybe_transform( { - "serves_batch": serves_batch, - "serves_inference": serves_inference, + "page": page, + "per_page": per_page, }, region_list_params.RegionListParams, ), @@ -113,8 +114,8 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: async def list( self, *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -123,12 +124,15 @@ async def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RegionListResponse: """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + To list all of the regions that are available, send a GET request to + `/v2/regions`. The response will be a JSON object with a key called `regions`. + The value of this will be an array of `region` objects, each of which will + contain the standard region attributes. Args: - serves_batch: include datacenters that are capable of running batch jobs. + page: Which 'page' of paginated results to return. - serves_inference: include datacenters that serve inference. + per_page: Number of items returned per page extra_headers: Send extra headers @@ -139,9 +143,7 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", + "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -149,8 +151,8 @@ async def list( timeout=timeout, query=await async_maybe_transform( { - "serves_batch": serves_batch, - "serves_inference": serves_inference, + "page": page, + "per_page": per_page, }, region_list_params.RegionListParams, ), diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index c8144381..20747fb3 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -3,9 +3,32 @@ from __future__ import annotations from .shared import ( + Size as Size, + Image as Image, + Action as Action, + Kernel as Kernel, + Region as Region, APIMeta as APIMeta, + Droplet as Droplet, + GPUInfo as GPUInfo, APILinks as APILinks, + DiskInfo as DiskInfo, + NetworkV4 as NetworkV4, + NetworkV6 as NetworkV6, + PageLinks as PageLinks, + Snapshots as Snapshots, + ActionLink as ActionLink, + VpcPeering as VpcPeering, + ForwardLinks as ForwardLinks, + Subscription as Subscription, + BackwardLinks as BackwardLinks, + MetaProperties as MetaProperties, + CompletionUsage as CompletionUsage, + GarbageCollection as GarbageCollection, + FirewallRuleTarget as FirewallRuleTarget, ChatCompletionChunk as ChatCompletionChunk, + SubscriptionTierBase as SubscriptionTierBase, + DropletNextBackupWindow as DropletNextBackupWindow, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, ) from .api_agent import APIAgent as APIAgent @@ -15,7 +38,6 @@ from .api_agent_model import APIAgentModel as APIAgentModel from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion -from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams @@ -27,18 +49,34 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .droplet_backup_policy import DropletBackupPolicy as DropletBackupPolicy from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams +from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility +from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams +from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .droplet_backup_policy_param import DropletBackupPolicyParam as DropletBackupPolicyParam +from .gpu_droplet_create_response import GPUDropletCreateResponse as GPUDropletCreateResponse from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .gpu_droplet_retrieve_response import GPUDropletRetrieveResponse as GPUDropletRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse +from .gpu_droplet_list_kernels_params import GPUDropletListKernelsParams as GPUDropletListKernelsParams +from .gpu_droplet_delete_by_tag_params import GPUDropletDeleteByTagParams as GPUDropletDeleteByTagParams from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse +from .gpu_droplet_list_firewalls_params import GPUDropletListFirewallsParams as GPUDropletListFirewallsParams +from .gpu_droplet_list_kernels_response import GPUDropletListKernelsResponse as GPUDropletListKernelsResponse +from .gpu_droplet_list_snapshots_params import GPUDropletListSnapshotsParams as GPUDropletListSnapshotsParams +from .gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse +from .gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse as GPUDropletListNeighborsResponse +from .gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse as GPUDropletListSnapshotsResponse diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py index 58b99df7..68ebd227 100644 --- a/src/gradientai/types/agent_create_params.py +++ b/src/gradientai/types/agent_create_params.py @@ -12,8 +12,10 @@ class AgentCreateParams(TypedDict, total=False): anthropic_key_uuid: str + """Optional Anthropic API key ID to use with Anthropic models""" description: str + """A text description of the agent, not used in inference""" instruction: str """Agent instruction. @@ -24,16 +26,22 @@ class AgentCreateParams(TypedDict, total=False): """ knowledge_base_uuid: List[str] + """Ids of the knowledge base(s) to attach to the agent""" model_uuid: str """Identifier for the foundation model.""" name: str + """Agent name""" openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + """Optional OpenAI API key ID to use with OpenAI models""" project_id: str + """The id of the DigitalOcean project this agent will belong to""" region: str + """The DigitalOcean region to deploy your agent in""" tags: List[str] + """Agent tag to organize related resources""" diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py index 48545fe9..edd48b7d 100644 --- a/src/gradientai/types/agent_create_response.py +++ b/src/gradientai/types/agent_create_response.py @@ -11,6 +11,7 @@ class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py index eb1d440d..8c2b2e14 100644 --- a/src/gradientai/types/agent_delete_response.py +++ b/src/gradientai/types/agent_delete_response.py @@ -11,6 +11,7 @@ class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py index e13a10c9..b56d0395 100644 --- a/src/gradientai/types/agent_list_params.py +++ b/src/gradientai/types/agent_list_params.py @@ -9,10 +9,10 @@ class AgentListParams(TypedDict, total=False): only_deployed: bool - """only list agents that are deployed.""" + """Only list agents that are deployed.""" page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 397d9fd2..7a64c66e 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -29,6 +29,7 @@ class AgentChatbot(BaseModel): logo: Optional[str] = None name: Optional[str] = None + """Name of chatbot""" primary_color: Optional[str] = None @@ -39,12 +40,15 @@ class AgentChatbot(BaseModel): class AgentChatbotIdentifier(BaseModel): agent_chatbot_identifier: Optional[str] = None + """Agent chatbot identifier""" class AgentDeployment(BaseModel): created_at: Optional[datetime] = None + """Creation date / time""" name: Optional[str] = None + """Name""" status: Optional[ Literal[ @@ -61,70 +65,112 @@ class AgentDeployment(BaseModel): ] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your deployed agent here""" uuid: Optional[str] = None + """Unique id""" visibility: Optional[APIDeploymentVisibility] = None + """ + - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + """ class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None + """Priority of the guardrail""" uuid: Optional[str] = None + """Uuid of the guardrail""" class AgentTemplate(BaseModel): created_at: Optional[datetime] = None + """The agent template's creation date""" description: Optional[str] = None + """Deprecated - Use summary instead""" guardrails: Optional[List[AgentTemplateGuardrail]] = None + """List of guardrails associated with the agent template""" instruction: Optional[str] = None + """Instructions for the agent template""" k: Optional[int] = None + """The 'k' value for the agent template""" knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """List of knowledge bases associated with the agent template""" long_description: Optional[str] = None + """The long description of the agent template""" max_tokens: Optional[int] = None + """The max_tokens setting for the agent template""" model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Name of the agent template""" short_description: Optional[str] = None + """The short description of the agent template""" summary: Optional[str] = None + """The summary of the agent template""" tags: Optional[List[str]] = None + """List of tags associated with the agent template""" temperature: Optional[float] = None + """The temperature setting for the agent template""" template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + """ + - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template + - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template + """ top_p: Optional[float] = None + """The top_p setting for the agent template""" updated_at: Optional[datetime] = None + """The agent template's last updated date""" uuid: Optional[str] = None + """Unique id""" class Agent(BaseModel): chatbot: Optional[AgentChatbot] = None + """A Chatbot""" chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None + """Chatbot identifiers""" created_at: Optional[datetime] = None + """Creation date / time""" deployment: Optional[AgentDeployment] = None + """Description of deployment""" description: Optional[str] = None + """Description of agent""" if_case: Optional[str] = None + """Instructions to the agent on how to use the route""" instruction: Optional[str] = None """Agent instruction. @@ -135,6 +181,7 @@ class Agent(BaseModel): """ k: Optional[int] = None + """How many results should be considered from an attached knowledge base""" max_tokens: Optional[int] = None """ @@ -144,26 +191,43 @@ class Agent(BaseModel): """ model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Agent name""" project_id: Optional[str] = None + """The DigitalOcean project ID associated with the agent""" provide_citations: Optional[bool] = None + """Whether the agent should provide in-response citations""" region: Optional[str] = None + """Region code""" retrieval_method: Optional[APIRetrievalMethod] = None + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ route_created_at: Optional[datetime] = None + """Creation of route date / time""" route_created_by: Optional[str] = None + """Id of user that created the route""" route_name: Optional[str] = None + """Route name""" route_uuid: Optional[str] = None + """Route uuid""" tags: Optional[List[str]] = None + """A set of abitrary tags to organize your agent""" temperature: Optional[float] = None """Controls the model’s creativity, specified as a number between 0 and 1. @@ -173,6 +237,7 @@ class Agent(BaseModel): """ template: Optional[AgentTemplate] = None + """Represents an AgentTemplate entity""" top_p: Optional[float] = None """ @@ -182,17 +247,27 @@ class Agent(BaseModel): """ updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your agent under this url""" user_id: Optional[str] = None + """Id of user that created the agent""" uuid: Optional[str] = None + """Unique agent id""" + + version_hash: Optional[str] = None + """The latest version of the agent""" class AgentListResponse(BaseModel): agents: Optional[List[Agent]] = None + """Agents""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py index 2eed88af..2836558b 100644 --- a/src/gradientai/types/agent_retrieve_response.py +++ b/src/gradientai/types/agent_retrieve_response.py @@ -11,6 +11,7 @@ class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py index 85f9a9c2..5d2b5597 100644 --- a/src/gradientai/types/agent_update_params.py +++ b/src/gradientai/types/agent_update_params.py @@ -13,8 +13,13 @@ class AgentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str + """Optional anthropic key uuid for use with anthropic models""" + + conversation_logs_enabled: bool + """Optional update of conversation logs enabled""" description: str + """Agent description""" instruction: str """Agent instruction. @@ -25,6 +30,7 @@ class AgentUpdateParams(TypedDict, total=False): """ k: int + """How many results should be considered from an attached knowledge base""" max_tokens: int """ @@ -37,16 +43,27 @@ class AgentUpdateParams(TypedDict, total=False): """Identifier for the foundation model.""" name: str + """Agent name""" openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + """Optional OpenAI key uuid for use with OpenAI models""" project_id: str + """The id of the DigitalOcean project this agent will belong to""" provide_citations: bool retrieval_method: APIRetrievalMethod + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ tags: List[str] + """A set of abitrary tags to organize your agent""" temperature: float """Controls the model’s creativity, specified as a number between 0 and 1. @@ -63,3 +80,4 @@ class AgentUpdateParams(TypedDict, total=False): """ body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Unique agent id""" diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py index 2948aa1c..1976089b 100644 --- a/src/gradientai/types/agent_update_response.py +++ b/src/gradientai/types/agent_update_response.py @@ -11,6 +11,7 @@ class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py index a0cdc0b9..3f16fdc2 100644 --- a/src/gradientai/types/agent_update_status_params.py +++ b/src/gradientai/types/agent_update_status_params.py @@ -12,5 +12,16 @@ class AgentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Unique id""" visibility: APIDeploymentVisibility + """ + - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + """ diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py index b200f99d..84457d85 100644 --- a/src/gradientai/types/agent_update_status_response.py +++ b/src/gradientai/types/agent_update_status_response.py @@ -11,6 +11,7 @@ class AgentUpdateStatusResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 9c6508f6..39b82ebc 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -40,6 +40,7 @@ from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse +from .evaluation_run_list_results_params import EvaluationRunListResultsParams as EvaluationRunListResultsParams from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams @@ -47,9 +48,15 @@ from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse from .evaluation_test_case_retrieve_params import EvaluationTestCaseRetrieveParams as EvaluationTestCaseRetrieveParams from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse +from .evaluation_metric_list_regions_params import ( + EvaluationMetricListRegionsParams as EvaluationMetricListRegionsParams, +) from .evaluation_test_case_retrieve_response import ( EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, ) +from .evaluation_metric_list_regions_response import ( + EvaluationMetricListRegionsResponse as EvaluationMetricListRegionsResponse, +) from .evaluation_run_retrieve_results_response import ( EvaluationRunRetrieveResultsResponse as EvaluationRunRetrieveResultsResponse, ) diff --git a/src/gradientai/types/agents/api_evaluation_metric.py b/src/gradientai/types/agents/api_evaluation_metric.py index 1aa85306..2d3b4194 100644 --- a/src/gradientai/types/agents/api_evaluation_metric.py +++ b/src/gradientai/types/agents/api_evaluation_metric.py @@ -11,6 +11,9 @@ class APIEvaluationMetric(BaseModel): description: Optional[str] = None + inverted: Optional[bool] = None + """If true, the metric is inverted, meaning that a lower value is better.""" + metric_name: Optional[str] = None metric_type: Optional[ @@ -20,5 +23,16 @@ class APIEvaluationMetric(BaseModel): metric_uuid: Optional[str] = None metric_value_type: Optional[ - Literal["METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING"] + Literal[ + "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", + "METRIC_VALUE_TYPE_PERCENTAGE", + ] ] = None + + range_max: Optional[float] = None + """The maximum value for the metric.""" + + range_min: Optional[float] = None + """The minimum value for the metric.""" diff --git a/src/gradientai/types/agents/api_evaluation_metric_result.py b/src/gradientai/types/agents/api_evaluation_metric_result.py index 35146c00..3d6ea84f 100644 --- a/src/gradientai/types/agents/api_evaluation_metric_result.py +++ b/src/gradientai/types/agents/api_evaluation_metric_result.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Optional +from typing_extensions import Literal from ..._models import BaseModel @@ -8,10 +9,26 @@ class APIEvaluationMetricResult(BaseModel): + error_description: Optional[str] = None + """Error description if the metric could not be calculated.""" + metric_name: Optional[str] = None + """Metric name""" + + metric_value_type: Optional[ + Literal[ + "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", + "METRIC_VALUE_TYPE_PERCENTAGE", + ] + ] = None number_value: Optional[float] = None """The value of the metric as a number.""" + reasoning: Optional[str] = None + """Reasoning of the metric result.""" + string_value: Optional[str] = None """The value of the metric as a string.""" diff --git a/src/gradientai/types/agents/api_evaluation_prompt.py b/src/gradientai/types/agents/api_evaluation_prompt.py index 750e62fb..7471e9ae 100644 --- a/src/gradientai/types/agents/api_evaluation_prompt.py +++ b/src/gradientai/types/agents/api_evaluation_prompt.py @@ -31,12 +31,19 @@ class APIEvaluationPrompt(BaseModel): input: Optional[str] = None + input_tokens: Optional[str] = None + """The number of input tokens used in the prompt.""" + output: Optional[str] = None + output_tokens: Optional[str] = None + """The number of output tokens used in the prompt.""" + prompt_chunks: Optional[List[PromptChunk]] = None """The list of prompt chunks.""" prompt_id: Optional[int] = None + """Prompt ID""" prompt_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None """The metric results for the prompt.""" diff --git a/src/gradientai/types/agents/api_evaluation_run.py b/src/gradientai/types/agents/api_evaluation_run.py index b879f756..5a758898 100644 --- a/src/gradientai/types/agents/api_evaluation_run.py +++ b/src/gradientai/types/agents/api_evaluation_run.py @@ -12,31 +12,42 @@ class APIEvaluationRun(BaseModel): agent_deleted: Optional[bool] = None + """Whether agent is deleted""" agent_name: Optional[str] = None + """Agent name""" agent_uuid: Optional[str] = None """Agent UUID.""" agent_version_hash: Optional[str] = None + """Version hash""" agent_workspace_uuid: Optional[str] = None + """Agent workspace uuid""" created_by_user_email: Optional[str] = None created_by_user_id: Optional[str] = None error_description: Optional[str] = None + """The error description""" evaluation_run_uuid: Optional[str] = None """Evaluation run UUID.""" + evaluation_test_case_workspace_uuid: Optional[str] = None + """Evaluation test case workspace uuid""" + finished_at: Optional[datetime] = None """Run end time.""" pass_status: Optional[bool] = None """The pass status of the evaluation run based on the star metric.""" + queued_at: Optional[datetime] = None + """Run queued time.""" + run_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None run_name: Optional[str] = None @@ -60,6 +71,13 @@ class APIEvaluationRun(BaseModel): "EVALUATION_RUN_FAILED", ] ] = None + """Evaluation Run Statuses""" + + test_case_description: Optional[str] = None + """Test case description.""" + + test_case_name: Optional[str] = None + """Test case name.""" test_case_uuid: Optional[str] = None """Test-case UUID.""" diff --git a/src/gradientai/types/agents/api_evaluation_test_case.py b/src/gradientai/types/agents/api_evaluation_test_case.py index 09ce5e48..dc4c55f0 100644 --- a/src/gradientai/types/agents/api_evaluation_test_case.py +++ b/src/gradientai/types/agents/api_evaluation_test_case.py @@ -7,7 +7,27 @@ from .api_star_metric import APIStarMetric from .api_evaluation_metric import APIEvaluationMetric -__all__ = ["APIEvaluationTestCase"] +__all__ = ["APIEvaluationTestCase", "Dataset"] + + +class Dataset(BaseModel): + created_at: Optional[datetime] = None + """Time created at.""" + + dataset_name: Optional[str] = None + """Name of the dataset.""" + + dataset_uuid: Optional[str] = None + """UUID of the dataset.""" + + file_size: Optional[str] = None + """The size of the dataset uploaded file in bytes.""" + + has_ground_truth: Optional[bool] = None + """Does the dataset have a ground truth column?""" + + row_count: Optional[int] = None + """Number of rows in the dataset.""" class APIEvaluationTestCase(BaseModel): @@ -19,6 +39,8 @@ class APIEvaluationTestCase(BaseModel): created_by_user_id: Optional[str] = None + dataset: Optional[Dataset] = None + dataset_name: Optional[str] = None dataset_uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py index c3fc44cd..184c330c 100644 --- a/src/gradientai/types/agents/api_key_create_params.py +++ b/src/gradientai/types/agents/api_key_create_params.py @@ -11,5 +11,7 @@ class APIKeyCreateParams(TypedDict, total=False): body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" name: str + """A human friendly name to identify the key""" diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py index 09689fe7..ed8906c8 100644 --- a/src/gradientai/types/agents/api_key_create_response.py +++ b/src/gradientai/types/agents/api_key_create_response.py @@ -10,3 +10,4 @@ class APIKeyCreateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py index 02b03f61..1f38c52e 100644 --- a/src/gradientai/types/agents/api_key_delete_response.py +++ b/src/gradientai/types/agents/api_key_delete_response.py @@ -10,3 +10,4 @@ class APIKeyDeleteResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py index 11da9398..1f8f96b7 100644 --- a/src/gradientai/types/agents/api_key_list_params.py +++ b/src/gradientai/types/agents/api_key_list_params.py @@ -9,7 +9,7 @@ class APIKeyListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py index aedb88ca..0040e91c 100644 --- a/src/gradientai/types/agents/api_key_list_response.py +++ b/src/gradientai/types/agents/api_key_list_response.py @@ -12,7 +12,10 @@ class APIKeyListResponse(BaseModel): api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py index ea2f761e..400140fb 100644 --- a/src/gradientai/types/agents/api_key_regenerate_response.py +++ b/src/gradientai/types/agents/api_key_regenerate_response.py @@ -10,3 +10,4 @@ class APIKeyRegenerateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py index b49ebb38..ba997a2f 100644 --- a/src/gradientai/types/agents/api_key_update_params.py +++ b/src/gradientai/types/agents/api_key_update_params.py @@ -13,7 +13,10 @@ class APIKeyUpdateParams(TypedDict, total=False): path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name""" diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py index 87442329..56154b16 100644 --- a/src/gradientai/types/agents/api_key_update_response.py +++ b/src/gradientai/types/agents/api_key_update_response.py @@ -10,3 +10,4 @@ class APIKeyUpdateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py index a38f021b..2e7cec1e 100644 --- a/src/gradientai/types/agents/api_link_knowledge_base_output.py +++ b/src/gradientai/types/agents/api_link_knowledge_base_output.py @@ -11,6 +11,7 @@ class APILinkKnowledgeBaseOutput(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/api_star_metric.py b/src/gradientai/types/agents/api_star_metric.py index c9ecc60a..0d04dea9 100644 --- a/src/gradientai/types/agents/api_star_metric.py +++ b/src/gradientai/types/agents/api_star_metric.py @@ -12,6 +12,12 @@ class APIStarMetric(BaseModel): name: Optional[str] = None + success_threshold: Optional[float] = None + """ + The success threshold for the star metric. This is a value that the metric must + reach to be considered successful. + """ + success_threshold_pct: Optional[int] = None """ The success threshold for the star metric. This is a percentage value between 0 diff --git a/src/gradientai/types/agents/api_star_metric_param.py b/src/gradientai/types/agents/api_star_metric_param.py index 5f7b2fd9..781fb2b1 100644 --- a/src/gradientai/types/agents/api_star_metric_param.py +++ b/src/gradientai/types/agents/api_star_metric_param.py @@ -12,6 +12,12 @@ class APIStarMetricParam(TypedDict, total=False): name: str + success_threshold: float + """ + The success threshold for the star metric. This is a value that the metric must + reach to be considered successful. + """ + success_threshold_pct: int """ The success threshold for the star metric. This is a percentage value between 0 diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py index ec5c6b70..aaec2ba5 100644 --- a/src/gradientai/types/agents/chat/completion_create_params.py +++ b/src/gradientai/types/agents/chat/completion_create_params.py @@ -12,7 +12,15 @@ "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageToolCall", + "MessageChatCompletionRequestAssistantMessageToolCallFunction", + "MessageChatCompletionRequestToolMessage", "StreamOptions", + "ToolChoice", + "ToolChoiceChatCompletionNamedToolChoice", + "ToolChoiceChatCompletionNamedToolChoiceFunction", + "Tool", + "ToolFunction", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -105,6 +113,25 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ + tool_choice: ToolChoice + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + """ + + tools: Iterable[Tool] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. + """ + top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -152,6 +179,30 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False): """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" @@ -159,12 +210,27 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): content: Union[str, List[str], None] """The contents of the assistant message.""" + tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] + """The tool calls generated by the model, such as function calls.""" + + +class MessageChatCompletionRequestToolMessage(TypedDict, total=False): + content: Required[str] + """The contents of the tool message.""" + + role: Required[Literal["tool"]] + """The role of the messages author, in this case `tool`.""" + + tool_call_id: Required[str] + """Tool call that this message is responding to.""" + Message: TypeAlias = Union[ MessageChatCompletionRequestSystemMessage, MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, + MessageChatCompletionRequestToolMessage, ] @@ -181,6 +247,53 @@ class StreamOptions(TypedDict, total=False): """ +class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): + function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + +ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] + + +class ToolFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Dict[str, object] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + +class Tool(TypedDict, total=False): + function: Required[ToolFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py index f2860c31..4c839ded 100644 --- a/src/gradientai/types/agents/chat/completion_create_response.py +++ b/src/gradientai/types/agents/chat/completion_create_response.py @@ -4,9 +4,17 @@ from typing_extensions import Literal from ...._models import BaseModel +from ...shared.completion_usage import CompletionUsage from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] +__all__ = [ + "CompletionCreateResponse", + "Choice", + "ChoiceLogprobs", + "ChoiceMessage", + "ChoiceMessageToolCall", + "ChoiceMessageToolCallFunction", +] class ChoiceLogprobs(BaseModel): @@ -17,6 +25,30 @@ class ChoiceLogprobs(BaseModel): """A list of message refusal tokens with log probability information.""" +class ChoiceMessageToolCallFunction(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChoiceMessageToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: ChoiceMessageToolCallFunction + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" + + class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" @@ -27,14 +59,17 @@ class ChoiceMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + tool_calls: Optional[List[ChoiceMessageToolCall]] = None + """The tool calls generated by the model, such as function calls.""" + class Choice(BaseModel): - finish_reason: Literal["stop", "length"] + finish_reason: Literal["stop", "length", "tool_calls"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached. + was reached, `tool_calls` if the model called a tool. """ index: int @@ -47,17 +82,6 @@ class Choice(BaseModel): """A chat completion message generated by the model.""" -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - class CompletionCreateResponse(BaseModel): id: str """A unique identifier for the chat completion.""" @@ -77,5 +101,5 @@ class CompletionCreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - usage: Optional[Usage] = None + usage: Optional[CompletionUsage] = None """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py index 6aa6d27a..9a4000c0 100644 --- a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py +++ b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py @@ -15,6 +15,7 @@ class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=Fals class File(TypedDict, total=False): file_name: str + """Local filename""" file_size: str """The size of the file in bytes.""" diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py new file mode 100644 index 00000000..701e7d4e --- /dev/null +++ b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationMetricListRegionsParams"] + + +class EvaluationMetricListRegionsParams(TypedDict, total=False): + serves_batch: bool + """Include datacenters that are capable of running batch jobs.""" + + serves_inference: bool + """Include datacenters that serve inference.""" diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py new file mode 100644 index 00000000..7246d484 --- /dev/null +++ b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationMetricListRegionsResponse", "Region"] + + +class Region(BaseModel): + inference_url: Optional[str] = None + """Url for inference server""" + + region: Optional[str] = None + """Region code""" + + serves_batch: Optional[bool] = None + """This datacenter is capable of running batch jobs""" + + serves_inference: Optional[bool] = None + """This datacenter is capable of serving inference""" + + stream_inference_url: Optional[str] = None + """The url for the inference streaming server""" + + +class EvaluationMetricListRegionsResponse(BaseModel): + regions: Optional[List[Region]] = None + """Region code""" diff --git a/src/gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradientai/types/agents/evaluation_metrics/__init__.py index 7af9b074..c349624b 100644 --- a/src/gradientai/types/agents/evaluation_metrics/__init__.py +++ b/src/gradientai/types/agents/evaluation_metrics/__init__.py @@ -2,6 +2,8 @@ from __future__ import annotations +from .model_list_params import ModelListParams as ModelListParams +from .model_list_response import ModelListResponse as ModelListResponse from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams diff --git a/src/gradientai/types/model_list_params.py b/src/gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 87% rename from src/gradientai/types/model_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/model_list_params.py index 4abc1dc1..a2fa066a 100644 --- a/src/gradientai/types/model_list_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/model_list_params.py @@ -10,13 +10,13 @@ class ModelListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" public_only: bool - """only include models that are publicly available.""" + """Only include models that are publicly available.""" usecases: List[ Literal[ @@ -29,7 +29,7 @@ class ModelListParams(TypedDict, total=False): "MODEL_USECASE_SERVERLESS", ] ] - """include only models defined for the listed usecases. + """Include only models defined for the listed usecases. - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - MODEL_USECASE_AGENT: The model maybe used in an agent diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py new file mode 100644 index 00000000..2fc17524 --- /dev/null +++ b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...api_model import APIModel +from ...shared.api_meta import APIMeta +from ...shared.api_links import APILinks + +__all__ = ["ModelListResponse"] + + +class ModelListResponse(BaseModel): + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" + + models: Optional[List[APIModel]] = None + """The models""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py index 73f390be..7a418e81 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py @@ -10,7 +10,10 @@ class WorkspaceCreateParams(TypedDict, total=False): agent_uuids: List[str] + """Ids of the agents(s) to attach to the workspace""" description: str + """Description of the workspace""" name: str + """Name of the workspace""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py index 1fe7b5a2..3e094515 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py @@ -9,3 +9,4 @@ class WorkspaceDeleteResponse(BaseModel): workspace_uuid: Optional[str] = None + """Workspace""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py index 64f9a63c..793623dd 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py @@ -11,6 +11,7 @@ class WorkspaceListResponse(BaseModel): workspaces: Optional[List["APIWorkspace"]] = None + """Workspaces""" from ...api_workspace import APIWorkspace diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py index fd09079e..d5906bd9 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py @@ -11,8 +11,10 @@ class WorkspaceUpdateParams(TypedDict, total=False): description: str + """The new description of the workspace""" name: str + """The new name of the workspace""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] """Workspace UUID.""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py index 277274ed..b56d0395 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py @@ -2,25 +2,17 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict -__all__ = ["AgentListParams", "FieldMask"] +__all__ = ["AgentListParams"] class AgentListParams(TypedDict, total=False): - field_mask: FieldMask - only_deployed: bool """Only list agents that are deployed.""" page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" - - -class FieldMask(TypedDict, total=False): - paths: List[str] - """The set of field mask paths.""" + """Items per page.""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py index 1e520736..6f9ea948 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py @@ -15,8 +15,10 @@ class AgentListResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" from ....api_agent import APIAgent diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py index 8e92503a..74e27dd2 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py @@ -12,5 +12,7 @@ class AgentMoveParams(TypedDict, total=False): agent_uuids: List[str] + """Agent uuids""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] + """Workspace uuid to move agents to""" diff --git a/src/gradientai/types/agents/evaluation_run_create_params.py b/src/gradientai/types/agents/evaluation_run_create_params.py index 47bdabd6..3029e192 100644 --- a/src/gradientai/types/agents/evaluation_run_create_params.py +++ b/src/gradientai/types/agents/evaluation_run_create_params.py @@ -16,3 +16,4 @@ class EvaluationRunCreateParams(TypedDict, total=False): """The name of the run.""" test_case_uuid: str + """Test-case UUID to run""" diff --git a/src/gradientai/types/agents/evaluation_run_list_results_params.py b/src/gradientai/types/agents/evaluation_run_list_results_params.py new file mode 100644 index 00000000..bcf96c14 --- /dev/null +++ b/src/gradientai/types/agents/evaluation_run_list_results_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationRunListResultsParams"] + + +class EvaluationRunListResultsParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" diff --git a/src/gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradientai/types/agents/evaluation_run_list_results_response.py index f0a9882b..df830a5b 100644 --- a/src/gradientai/types/agents/evaluation_run_list_results_response.py +++ b/src/gradientai/types/agents/evaluation_run_list_results_response.py @@ -3,6 +3,8 @@ from typing import List, Optional from ..._models import BaseModel +from ..shared.api_meta import APIMeta +from ..shared.api_links import APILinks from .api_evaluation_run import APIEvaluationRun from .api_evaluation_prompt import APIEvaluationPrompt @@ -12,5 +14,11 @@ class EvaluationRunListResultsResponse(BaseModel): evaluation_run: Optional[APIEvaluationRun] = None + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" + prompts: Optional[List[APIEvaluationPrompt]] = None """The prompt level results.""" diff --git a/src/gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradientai/types/agents/evaluation_test_case_list_response.py index ccfc263e..62b97961 100644 --- a/src/gradientai/types/agents/evaluation_test_case_list_response.py +++ b/src/gradientai/types/agents/evaluation_test_case_list_response.py @@ -10,3 +10,7 @@ class EvaluationTestCaseListResponse(BaseModel): evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None + """ + Alternative way of authentication for internal usage only - should not be + exposed to public api + """ diff --git a/src/gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradientai/types/agents/evaluation_test_case_update_params.py index be70fc95..825f961b 100644 --- a/src/gradientai/types/agents/evaluation_test_case_update_params.py +++ b/src/gradientai/types/agents/evaluation_test_case_update_params.py @@ -26,6 +26,7 @@ class EvaluationTestCaseUpdateParams(TypedDict, total=False): star_metric: APIStarMetricParam body_test_case_uuid: Annotated[str, PropertyInfo(alias="test_case_uuid")] + """Test-case UUID to update""" class Metrics(TypedDict, total=False): diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py index 938fb1d5..000de32b 100644 --- a/src/gradientai/types/agents/function_create_params.py +++ b/src/gradientai/types/agents/function_create_params.py @@ -11,15 +11,22 @@ class FunctionCreateParams(TypedDict, total=False): body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" description: str + """Function description""" faas_name: str + """The name of the function in the DigitalOcean functions platform""" faas_namespace: str + """The namespace of the function in the DigitalOcean functions platform""" function_name: str + """Function name""" input_schema: object + """Describe the input schema for the function so the agent may call it""" output_schema: object + """Describe the output schema for the function so the agent handle its response""" diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py index 82ab984b..65a4bb2b 100644 --- a/src/gradientai/types/agents/function_create_response.py +++ b/src/gradientai/types/agents/function_create_response.py @@ -11,6 +11,7 @@ class FunctionCreateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py index 678ef62d..26ad02e6 100644 --- a/src/gradientai/types/agents/function_delete_response.py +++ b/src/gradientai/types/agents/function_delete_response.py @@ -11,6 +11,7 @@ class FunctionDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py index 2fa8e8f0..67c6ea9b 100644 --- a/src/gradientai/types/agents/function_update_params.py +++ b/src/gradientai/types/agents/function_update_params.py @@ -13,17 +13,25 @@ class FunctionUpdateParams(TypedDict, total=False): path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" description: str + """Funciton description""" faas_name: str + """The name of the function in the DigitalOcean functions platform""" faas_namespace: str + """The namespace of the function in the DigitalOcean functions platform""" function_name: str + """Function name""" body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] + """Function id""" input_schema: object + """Describe the input schema for the function so the agent may call it""" output_schema: object + """Describe the output schema for the function so the agent handle its response""" diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py index 82fc63be..eebde3e6 100644 --- a/src/gradientai/types/agents/function_update_response.py +++ b/src/gradientai/types/agents/function_update_response.py @@ -11,6 +11,7 @@ class FunctionUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py index 76bb4236..0dc90aaf 100644 --- a/src/gradientai/types/agents/knowledge_base_detach_response.py +++ b/src/gradientai/types/agents/knowledge_base_detach_response.py @@ -11,6 +11,7 @@ class KnowledgeBaseDetachResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/route_add_params.py b/src/gradientai/types/agents/route_add_params.py index b4fcb417..d8dbeff8 100644 --- a/src/gradientai/types/agents/route_add_params.py +++ b/src/gradientai/types/agents/route_add_params.py @@ -13,6 +13,7 @@ class RouteAddParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + """Routed agent id""" if_case: str @@ -20,3 +21,4 @@ class RouteAddParams(TypedDict, total=False): """A unique identifier for the parent agent.""" route_name: str + """Name of route""" diff --git a/src/gradientai/types/agents/route_add_response.py b/src/gradientai/types/agents/route_add_response.py index cd3bb16a..b9cc2b7d 100644 --- a/src/gradientai/types/agents/route_add_response.py +++ b/src/gradientai/types/agents/route_add_response.py @@ -9,6 +9,7 @@ class RouteAddResponse(BaseModel): child_agent_uuid: Optional[str] = None + """Routed agent id""" parent_agent_uuid: Optional[str] = None """A unique identifier for the parent agent.""" diff --git a/src/gradientai/types/agents/route_delete_response.py b/src/gradientai/types/agents/route_delete_response.py index 07105a62..b49c8b7c 100644 --- a/src/gradientai/types/agents/route_delete_response.py +++ b/src/gradientai/types/agents/route_delete_response.py @@ -9,5 +9,7 @@ class RouteDeleteResponse(BaseModel): child_agent_uuid: Optional[str] = None + """Routed agent id""" parent_agent_uuid: Optional[str] = None + """Pagent agent id""" diff --git a/src/gradientai/types/agents/route_update_params.py b/src/gradientai/types/agents/route_update_params.py index cb6d6391..453a3b93 100644 --- a/src/gradientai/types/agents/route_update_params.py +++ b/src/gradientai/types/agents/route_update_params.py @@ -13,12 +13,16 @@ class RouteUpdateParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + """Routed agent id""" if_case: str + """Describes the case in which the child agent should be used""" body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] """A unique identifier for the parent agent.""" route_name: str + """Route name""" uuid: str + """Unique id of linkage""" diff --git a/src/gradientai/types/agents/route_update_response.py b/src/gradientai/types/agents/route_update_response.py index 75e1eda5..b79fc9fe 100644 --- a/src/gradientai/types/agents/route_update_response.py +++ b/src/gradientai/types/agents/route_update_response.py @@ -9,6 +9,7 @@ class RouteUpdateResponse(BaseModel): child_agent_uuid: Optional[str] = None + """Routed agent id""" parent_agent_uuid: Optional[str] = None """A unique identifier for the parent agent.""" @@ -16,3 +17,4 @@ class RouteUpdateResponse(BaseModel): rollback: Optional[bool] = None uuid: Optional[str] = None + """Unique id of linkage""" diff --git a/src/gradientai/types/agents/route_view_response.py b/src/gradientai/types/agents/route_view_response.py index dd9af70b..f0ee2d71 100644 --- a/src/gradientai/types/agents/route_view_response.py +++ b/src/gradientai/types/agents/route_view_response.py @@ -11,6 +11,7 @@ class RouteViewResponse(BaseModel): children: Optional[List["APIAgent"]] = None + """Child agents""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py index a71fd022..e8fa2f6d 100644 --- a/src/gradientai/types/agents/version_list_params.py +++ b/src/gradientai/types/agents/version_list_params.py @@ -9,7 +9,7 @@ class VersionListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py index af25150e..c35a5ba4 100644 --- a/src/gradientai/types/agents/version_list_response.py +++ b/src/gradientai/types/agents/version_list_response.py @@ -22,97 +22,146 @@ class AgentVersionAttachedChildAgent(BaseModel): agent_name: Optional[str] = None + """Name of the child agent""" child_agent_uuid: Optional[str] = None + """Child agent unique identifier""" if_case: Optional[str] = None + """If case""" is_deleted: Optional[bool] = None + """Child agent is deleted""" route_name: Optional[str] = None + """Route name""" class AgentVersionAttachedFunction(BaseModel): description: Optional[str] = None + """Description of the function""" faas_name: Optional[str] = None + """FaaS name of the function""" faas_namespace: Optional[str] = None + """FaaS namespace of the function""" is_deleted: Optional[bool] = None + """Whether the function is deleted""" name: Optional[str] = None + """Name of the function""" class AgentVersionAttachedGuardrail(BaseModel): is_deleted: Optional[bool] = None + """Whether the guardrail is deleted""" name: Optional[str] = None + """Guardrail Name""" priority: Optional[int] = None + """Guardrail Priority""" uuid: Optional[str] = None + """Guardrail UUID""" class AgentVersionAttachedKnowledgebase(BaseModel): is_deleted: Optional[bool] = None + """Deletet at date / time""" name: Optional[str] = None + """Name of the knowledge base""" uuid: Optional[str] = None + """Unique id of the knowledge base""" class AgentVersion(BaseModel): id: Optional[str] = None + """Unique identifier""" agent_uuid: Optional[str] = None + """Uuid of the agent this version belongs to""" attached_child_agents: Optional[List[AgentVersionAttachedChildAgent]] = None + """List of child agent relationships""" attached_functions: Optional[List[AgentVersionAttachedFunction]] = None + """List of function versions""" attached_guardrails: Optional[List[AgentVersionAttachedGuardrail]] = None + """List of guardrail version""" attached_knowledgebases: Optional[List[AgentVersionAttachedKnowledgebase]] = None + """List of knowledge base agent versions""" can_rollback: Optional[bool] = None + """Whether the version is able to be rolled back to""" created_at: Optional[datetime] = None + """Creation date""" created_by_email: Optional[str] = None + """User who created this version""" currently_applied: Optional[bool] = None + """Whether this is the currently applied configuration""" description: Optional[str] = None + """Description of the agent""" instruction: Optional[str] = None + """Instruction for the agent""" k: Optional[int] = None + """K value for the agent's configuration""" max_tokens: Optional[int] = None + """Max tokens setting for the agent""" - api_model_name: Optional[str] = FieldInfo(alias="model_name", default=None) + model: Optional[str] = FieldInfo(alias="model_name", default=None) + """Name of model associated to the agent version""" name: Optional[str] = None + """Name of the agent""" provide_citations: Optional[bool] = None + """Whether the agent should provide in-response citations""" retrieval_method: Optional[APIRetrievalMethod] = None + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ tags: Optional[List[str]] = None + """Tags associated with the agent""" temperature: Optional[float] = None + """Temperature setting for the agent""" top_p: Optional[float] = None + """Top_p setting for the agent""" trigger_action: Optional[str] = None + """Action triggering the configuration update""" version_hash: Optional[str] = None + """Version hash""" class VersionListResponse(BaseModel): agent_versions: Optional[List[AgentVersion]] = None + """Agents""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py index d7fb01cb..212eb05c 100644 --- a/src/gradientai/types/agents/version_update_params.py +++ b/src/gradientai/types/agents/version_update_params.py @@ -11,5 +11,7 @@ class VersionUpdateParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Agent unique identifier""" version_hash: str + """Unique identifier""" diff --git a/src/gradientai/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py index 72058319..464ef12f 100644 --- a/src/gradientai/types/agents/version_update_response.py +++ b/src/gradientai/types/agents/version_update_response.py @@ -28,3 +28,4 @@ class VersionUpdateResponse(BaseModel): """An alternative way to provide auth information. for internal use only.""" version_hash: Optional[str] = None + """Unique identifier""" diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 1378950a..4be22aa5 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -23,6 +23,7 @@ "Deployment", "Function", "Guardrail", + "LoggingConfig", "Template", "TemplateGuardrail", ] @@ -30,6 +31,7 @@ class APIKey(BaseModel): api_key: Optional[str] = None + """Api key""" class Chatbot(BaseModel): @@ -38,6 +40,7 @@ class Chatbot(BaseModel): logo: Optional[str] = None name: Optional[str] = None + """Name of chatbot""" primary_color: Optional[str] = None @@ -48,12 +51,15 @@ class Chatbot(BaseModel): class ChatbotIdentifier(BaseModel): agent_chatbot_identifier: Optional[str] = None + """Agent chatbot identifier""" class Deployment(BaseModel): created_at: Optional[datetime] = None + """Creation date / time""" name: Optional[str] = None + """Name""" status: Optional[ Literal[ @@ -70,22 +76,39 @@ class Deployment(BaseModel): ] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your deployed agent here""" uuid: Optional[str] = None + """Unique id""" visibility: Optional[APIDeploymentVisibility] = None + """ + - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + """ class Function(BaseModel): api_key: Optional[str] = None + """Api key""" created_at: Optional[datetime] = None + """Creation date / time""" created_by: Optional[str] = None + """Created by user id from DO""" description: Optional[str] = None + """Agent description""" faas_name: Optional[str] = None @@ -94,14 +117,18 @@ class Function(BaseModel): input_schema: Optional[object] = None name: Optional[str] = None + """Name""" output_schema: Optional[object] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Download your agent here""" uuid: Optional[str] = None + """Unique id""" class Guardrail(BaseModel): @@ -139,72 +166,122 @@ class Guardrail(BaseModel): uuid: Optional[str] = None +class LoggingConfig(BaseModel): + galileo_project_id: Optional[str] = None + """Galileo project identifier""" + + galileo_project_name: Optional[str] = None + """Name of the Galileo project""" + + log_stream_id: Optional[str] = None + """Identifier for the log stream""" + + log_stream_name: Optional[str] = None + """Name of the log stream""" + + class TemplateGuardrail(BaseModel): priority: Optional[int] = None + """Priority of the guardrail""" uuid: Optional[str] = None + """Uuid of the guardrail""" class Template(BaseModel): created_at: Optional[datetime] = None + """The agent template's creation date""" description: Optional[str] = None + """Deprecated - Use summary instead""" guardrails: Optional[List[TemplateGuardrail]] = None + """List of guardrails associated with the agent template""" instruction: Optional[str] = None + """Instructions for the agent template""" k: Optional[int] = None + """The 'k' value for the agent template""" knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """List of knowledge bases associated with the agent template""" long_description: Optional[str] = None + """The long description of the agent template""" max_tokens: Optional[int] = None + """The max_tokens setting for the agent template""" model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Name of the agent template""" short_description: Optional[str] = None + """The short description of the agent template""" summary: Optional[str] = None + """The summary of the agent template""" tags: Optional[List[str]] = None + """List of tags associated with the agent template""" temperature: Optional[float] = None + """The temperature setting for the agent template""" template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + """ + - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template + - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template + """ top_p: Optional[float] = None + """The top_p setting for the agent template""" updated_at: Optional[datetime] = None + """The agent template's last updated date""" uuid: Optional[str] = None + """Unique id""" class APIAgent(BaseModel): anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + """Api key infos""" api_keys: Optional[List[APIKey]] = None + """Api keys""" chatbot: Optional[Chatbot] = None + """A Chatbot""" chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None + """Chatbot identifiers""" child_agents: Optional[List["APIAgent"]] = None + """Child agents""" + + conversation_logs_enabled: Optional[bool] = None + """Whether conversation logs are enabled for the agent""" created_at: Optional[datetime] = None + """Creation date / time""" deployment: Optional[Deployment] = None + """Description of deployment""" description: Optional[str] = None + """Description of agent""" functions: Optional[List[Function]] = None guardrails: Optional[List[Guardrail]] = None + """The guardrails the agent is attached to""" if_case: Optional[str] = None @@ -219,48 +296,75 @@ class APIAgent(BaseModel): k: Optional[int] = None knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """Knowledge bases""" + + logging_config: Optional[LoggingConfig] = None max_tokens: Optional[int] = None model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Agent name""" openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" parent_agents: Optional[List["APIAgent"]] = None + """Parent agents""" project_id: Optional[str] = None provide_citations: Optional[bool] = None + """Whether the agent should provide in-response citations""" region: Optional[str] = None + """Region code""" retrieval_method: Optional[APIRetrievalMethod] = None + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ route_created_at: Optional[datetime] = None + """Creation of route date / time""" route_created_by: Optional[str] = None route_name: Optional[str] = None + """Route name""" route_uuid: Optional[str] = None tags: Optional[List[str]] = None + """Agent tag to organize related resources""" temperature: Optional[float] = None template: Optional[Template] = None + """Represents an AgentTemplate entity""" top_p: Optional[float] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your agent under this url""" user_id: Optional[str] = None + """Id of user that created the agent""" uuid: Optional[str] = None + """Unique agent id""" + + version_hash: Optional[str] = None + """The latest version of the agent""" workspace: Optional["APIWorkspace"] = None diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py index 8dc71564..7222153c 100644 --- a/src/gradientai/types/api_agent_api_key_info.py +++ b/src/gradientai/types/api_agent_api_key_info.py @@ -10,13 +10,18 @@ class APIAgentAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Creation date""" created_by: Optional[str] = None + """Created by""" deleted_at: Optional[datetime] = None + """Deleted date""" name: Optional[str] = None + """Name""" secret_key: Optional[str] = None uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py index 1025321b..f111bfb7 100644 --- a/src/gradientai/types/api_agent_model.py +++ b/src/gradientai/types/api_agent_model.py @@ -13,30 +13,41 @@ class APIAgentModel(BaseModel): agreement: Optional[APIAgreement] = None + """Agreement Description""" created_at: Optional[datetime] = None + """Creation date / time""" inference_name: Optional[str] = None + """Internally used name""" inference_version: Optional[str] = None + """Internally used version""" is_foundational: Optional[bool] = None + """True if it is a foundational model provided by do""" metadata: Optional[object] = None + """Additional meta data""" name: Optional[str] = None + """Name of the model""" parent_uuid: Optional[str] = None + """Unique id of the model, this model is based on""" provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( None ) updated_at: Optional[datetime] = None + """Last modified""" upload_complete: Optional[bool] = None + """Model has been fully uploaded""" url: Optional[str] = None + """Download url""" usecases: Optional[ List[ @@ -51,7 +62,10 @@ class APIAgentModel(BaseModel): ] ] ] = None + """Usecases of the model""" uuid: Optional[str] = None + """Unique id""" version: Optional[APIModelVersion] = None + """Version Information about a Model""" diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py index e2e04a8a..6440c5ef 100644 --- a/src/gradientai/types/api_anthropic_api_key_info.py +++ b/src/gradientai/types/api_anthropic_api_key_info.py @@ -10,13 +10,19 @@ class APIAnthropicAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Key creation date""" created_by: Optional[str] = None + """Created by user id from DO""" deleted_at: Optional[datetime] = None + """Key deleted date""" name: Optional[str] = None + """Name""" updated_at: Optional[datetime] = None + """Key last updated date""" uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 2b0676f0..4e4a6567 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -11,27 +11,37 @@ class APIKnowledgeBase(BaseModel): added_to_agent_at: Optional[datetime] = None + """Time when the knowledge base was added to the agent""" created_at: Optional[datetime] = None + """Creation date / time""" database_id: Optional[str] = None embedding_model_uuid: Optional[str] = None is_public: Optional[bool] = None + """Whether the knowledge base is public or not""" last_indexing_job: Optional[APIIndexingJob] = None + """IndexingJob description""" name: Optional[str] = None + """Name of knowledge base""" project_id: Optional[str] = None region: Optional[str] = None + """Region code""" tags: Optional[List[str]] = None + """Tags to organize related resources""" updated_at: Optional[datetime] = None + """Last modified""" user_id: Optional[str] = None + """Id of user that created the knowledge base""" uuid: Optional[str] = None + """Unique id for knowledge base""" diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index c2bc1edd..7c530ee2 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -12,21 +12,31 @@ class APIModel(BaseModel): agreement: Optional[APIAgreement] = None + """Agreement Description""" created_at: Optional[datetime] = None + """Creation date / time""" is_foundational: Optional[bool] = None + """True if it is a foundational model provided by do""" name: Optional[str] = None + """Name of the model""" parent_uuid: Optional[str] = None + """Unique id of the model, this model is based on""" updated_at: Optional[datetime] = None + """Last modified""" upload_complete: Optional[bool] = None + """Model has been fully uploaded""" url: Optional[str] = None + """Download url""" uuid: Optional[str] = None + """Unique id""" version: Optional[APIModelVersion] = None + """Version Information about a Model""" diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py index 2e118632..f19a78c6 100644 --- a/src/gradientai/types/api_model_version.py +++ b/src/gradientai/types/api_model_version.py @@ -9,7 +9,10 @@ class APIModelVersion(BaseModel): major: Optional[int] = None + """Major version number""" minor: Optional[int] = None + """Minor version number""" patch: Optional[int] = None + """Patch version number""" diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index 7467cfc2..bcee992b 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -11,15 +11,22 @@ class APIOpenAIAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Key creation date""" created_by: Optional[str] = None + """Created by user id from DO""" deleted_at: Optional[datetime] = None + """Key deleted date""" models: Optional[List[APIAgentModel]] = None + """Models supported by the openAI api key""" name: Optional[str] = None + """Name""" updated_at: Optional[datetime] = None + """Key last updated date""" uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py index 83e59379..564fabb6 100644 --- a/src/gradientai/types/api_workspace.py +++ b/src/gradientai/types/api_workspace.py @@ -13,24 +13,34 @@ class APIWorkspace(BaseModel): agents: Optional[List["APIAgent"]] = None + """Agents""" created_at: Optional[datetime] = None + """Creation date""" created_by: Optional[str] = None + """The id of user who created this workspace""" created_by_email: Optional[str] = None + """The email of the user who created this workspace""" deleted_at: Optional[datetime] = None + """Deleted date""" description: Optional[str] = None + """Description of the workspace""" evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None + """Evaluations""" name: Optional[str] = None + """Name of the workspace""" updated_at: Optional[datetime] = None + """Update date""" uuid: Optional[str] = None + """Unique id""" from .api_agent import APIAgent diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py index ec5c6b70..aaec2ba5 100644 --- a/src/gradientai/types/chat/completion_create_params.py +++ b/src/gradientai/types/chat/completion_create_params.py @@ -12,7 +12,15 @@ "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageToolCall", + "MessageChatCompletionRequestAssistantMessageToolCallFunction", + "MessageChatCompletionRequestToolMessage", "StreamOptions", + "ToolChoice", + "ToolChoiceChatCompletionNamedToolChoice", + "ToolChoiceChatCompletionNamedToolChoiceFunction", + "Tool", + "ToolFunction", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -105,6 +113,25 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ + tool_choice: ToolChoice + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + """ + + tools: Iterable[Tool] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. + """ + top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -152,6 +179,30 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False): """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" @@ -159,12 +210,27 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): content: Union[str, List[str], None] """The contents of the assistant message.""" + tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] + """The tool calls generated by the model, such as function calls.""" + + +class MessageChatCompletionRequestToolMessage(TypedDict, total=False): + content: Required[str] + """The contents of the tool message.""" + + role: Required[Literal["tool"]] + """The role of the messages author, in this case `tool`.""" + + tool_call_id: Required[str] + """Tool call that this message is responding to.""" + Message: TypeAlias = Union[ MessageChatCompletionRequestSystemMessage, MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, + MessageChatCompletionRequestToolMessage, ] @@ -181,6 +247,53 @@ class StreamOptions(TypedDict, total=False): """ +class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): + function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + +ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] + + +class ToolFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Dict[str, object] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + +class Tool(TypedDict, total=False): + function: Required[ToolFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py index 1791373b..73a09cf5 100644 --- a/src/gradientai/types/chat/completion_create_response.py +++ b/src/gradientai/types/chat/completion_create_response.py @@ -4,9 +4,17 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.completion_usage import CompletionUsage from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] +__all__ = [ + "CompletionCreateResponse", + "Choice", + "ChoiceLogprobs", + "ChoiceMessage", + "ChoiceMessageToolCall", + "ChoiceMessageToolCallFunction", +] class ChoiceLogprobs(BaseModel): @@ -17,6 +25,30 @@ class ChoiceLogprobs(BaseModel): """A list of message refusal tokens with log probability information.""" +class ChoiceMessageToolCallFunction(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChoiceMessageToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: ChoiceMessageToolCallFunction + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" + + class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" @@ -27,14 +59,17 @@ class ChoiceMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + tool_calls: Optional[List[ChoiceMessageToolCall]] = None + """The tool calls generated by the model, such as function calls.""" + class Choice(BaseModel): - finish_reason: Literal["stop", "length"] + finish_reason: Literal["stop", "length", "tool_calls"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached. + was reached, `tool_calls` if the model called a tool. """ index: int @@ -47,17 +82,6 @@ class Choice(BaseModel): """A chat completion message generated by the model.""" -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - class CompletionCreateResponse(BaseModel): id: str """A unique identifier for the chat completion.""" @@ -77,5 +101,5 @@ class CompletionCreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - usage: Optional[Usage] = None + usage: Optional[CompletionUsage] = None """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/droplet_backup_policy.py b/src/gradientai/types/droplet_backup_policy.py new file mode 100644 index 00000000..63112e8f --- /dev/null +++ b/src/gradientai/types/droplet_backup_policy.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["DropletBackupPolicy"] + + +class DropletBackupPolicy(BaseModel): + hour: Optional[Literal[0, 4, 8, 12, 16, 20]] = None + """The hour of the day that the backup window will start.""" + + plan: Optional[Literal["daily", "weekly"]] = None + """The backup plan used for the Droplet. + + The plan can be either `daily` or `weekly`. + """ + + retention_period_days: Optional[int] = None + """The number of days the backup will be retained.""" + + weekday: Optional[Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]] = None + """The day of the week on which the backup will occur.""" + + window_length_hours: Optional[int] = None + """The length of the backup window starting from `hour`.""" diff --git a/src/gradientai/types/droplet_backup_policy_param.py b/src/gradientai/types/droplet_backup_policy_param.py new file mode 100644 index 00000000..802f057f --- /dev/null +++ b/src/gradientai/types/droplet_backup_policy_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["DropletBackupPolicyParam"] + + +class DropletBackupPolicyParam(TypedDict, total=False): + hour: Literal[0, 4, 8, 12, 16, 20] + """The hour of the day that the backup window will start.""" + + plan: Literal["daily", "weekly"] + """The backup plan used for the Droplet. + + The plan can be either `daily` or `weekly`. + """ + + weekday: Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"] + """The day of the week on which the backup will occur.""" diff --git a/src/gradientai/types/gpu_droplet_create_params.py b/src/gradientai/types/gpu_droplet_create_params.py new file mode 100644 index 00000000..f38661fb --- /dev/null +++ b/src/gradientai/types/gpu_droplet_create_params.py @@ -0,0 +1,213 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Required, TypeAlias, TypedDict + +from .droplet_backup_policy_param import DropletBackupPolicyParam + +__all__ = ["GPUDropletCreateParams", "DropletSingleCreate", "DropletMultiCreate"] + + +class DropletSingleCreate(TypedDict, total=False): + image: Required[Union[str, int]] + """ + The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + """ + + name: Required[str] + """The human-readable string you wish to use when displaying the Droplet name. + + The name, if set to a domain name managed in the DigitalOcean DNS management + system, will configure a PTR record for the Droplet. The name set during + creation will also determine the hostname for the Droplet in its internal + configuration. + """ + + size: Required[str] + """The slug identifier for the size that you wish to select for this Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet. + + If omitted and `backups` is `true`, the backup plan will default to daily. + """ + + backups: bool + """ + A boolean indicating whether automated backups should be enabled for the + Droplet. + """ + + ipv6: bool + """A boolean indicating whether to enable IPv6 on the Droplet.""" + + monitoring: bool + """A boolean indicating whether to install the DigitalOcean agent for monitoring.""" + + private_networking: bool + """This parameter has been deprecated. + + Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no + `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC + for the region. + """ + + region: str + """The slug identifier for the region that you wish to deploy the Droplet in. + + If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can + be used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + """ + + ssh_keys: List[Union[str, int]] + """ + An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to apply to the Droplet after it is + created. + + Tag names can either be existing or new tags. Requires `tag:create` scope. + """ + + user_data: str + """ + A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + """ + + volumes: List[str] + """ + An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the Droplet will be assigned. + + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + """ + + with_droplet_agent: bool + """ + A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + """ + + +class DropletMultiCreate(TypedDict, total=False): + image: Required[Union[str, int]] + """ + The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + """ + + names: Required[List[str]] + """ + An array of human human-readable strings you wish to use when displaying the + Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS + management system, will configure a PTR record for the Droplet. Each name set + during creation will also determine the hostname for the Droplet in its internal + configuration. + """ + + size: Required[str] + """The slug identifier for the size that you wish to select for this Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet. + + If omitted and `backups` is `true`, the backup plan will default to daily. + """ + + backups: bool + """ + A boolean indicating whether automated backups should be enabled for the + Droplet. + """ + + ipv6: bool + """A boolean indicating whether to enable IPv6 on the Droplet.""" + + monitoring: bool + """A boolean indicating whether to install the DigitalOcean agent for monitoring.""" + + private_networking: bool + """This parameter has been deprecated. + + Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no + `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC + for the region. + """ + + region: str + """The slug identifier for the region that you wish to deploy the Droplet in. + + If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can + be used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + """ + + ssh_keys: List[Union[str, int]] + """ + An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to apply to the Droplet after it is + created. + + Tag names can either be existing or new tags. Requires `tag:create` scope. + """ + + user_data: str + """ + A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + """ + + volumes: List[str] + """ + An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the Droplet will be assigned. + + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + """ + + with_droplet_agent: bool + """ + A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + """ + + +GPUDropletCreateParams: TypeAlias = Union[DropletSingleCreate, DropletMultiCreate] diff --git a/src/gradientai/types/gpu_droplet_create_response.py b/src/gradientai/types/gpu_droplet_create_response.py new file mode 100644 index 00000000..72fafb96 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_create_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import TypeAlias + +from .._models import BaseModel +from .shared.droplet import Droplet +from .shared.action_link import ActionLink + +__all__ = [ + "GPUDropletCreateResponse", + "SingleDropletResponse", + "SingleDropletResponseLinks", + "MultipleDropletResponse", + "MultipleDropletResponseLinks", +] + + +class SingleDropletResponseLinks(BaseModel): + actions: Optional[List[ActionLink]] = None + + +class SingleDropletResponse(BaseModel): + droplet: Droplet + + links: SingleDropletResponseLinks + + +class MultipleDropletResponseLinks(BaseModel): + actions: Optional[List[ActionLink]] = None + + +class MultipleDropletResponse(BaseModel): + droplets: List[Droplet] + + links: MultipleDropletResponseLinks + + +GPUDropletCreateResponse: TypeAlias = Union[SingleDropletResponse, MultipleDropletResponse] diff --git a/src/gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/gradientai/types/gpu_droplet_delete_by_tag_params.py new file mode 100644 index 00000000..bc303125 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_delete_by_tag_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["GPUDropletDeleteByTagParams"] + + +class GPUDropletDeleteByTagParams(TypedDict, total=False): + tag_name: Required[str] + """Specifies Droplets to be deleted by tag.""" diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_params.py b/src/gradientai/types/gpu_droplet_list_firewalls_params.py new file mode 100644 index 00000000..1f0111d8 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_firewalls_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GPUDropletListFirewallsParams"] + + +class GPUDropletListFirewallsParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_response.py b/src/gradientai/types/gpu_droplet_list_firewalls_response.py new file mode 100644 index 00000000..617cdf98 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_firewalls_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.page_links import PageLinks +from .gpu_droplets.firewall import Firewall +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListFirewallsResponse"] + + +class GPUDropletListFirewallsResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + firewalls: Optional[List[Firewall]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplet_list_kernels_params.py b/src/gradientai/types/gpu_droplet_list_kernels_params.py new file mode 100644 index 00000000..7aa73225 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_kernels_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GPUDropletListKernelsParams"] + + +class GPUDropletListKernelsParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplet_list_kernels_response.py b/src/gradientai/types/gpu_droplet_list_kernels_response.py new file mode 100644 index 00000000..5fa9a355 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_kernels_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.kernel import Kernel +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListKernelsResponse"] + + +class GPUDropletListKernelsResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + kernels: Optional[List[Optional[Kernel]]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplet_list_neighbors_response.py b/src/gradientai/types/gpu_droplet_list_neighbors_response.py new file mode 100644 index 00000000..cdfce3e0 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_neighbors_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.droplet import Droplet + +__all__ = ["GPUDropletListNeighborsResponse"] + + +class GPUDropletListNeighborsResponse(BaseModel): + droplets: Optional[List[Droplet]] = None diff --git a/src/gradientai/types/gpu_droplet_list_params.py b/src/gradientai/types/gpu_droplet_list_params.py new file mode 100644 index 00000000..bf6eb793 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_params.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["GPUDropletListParams"] + + +class GPUDropletListParams(TypedDict, total=False): + name: str + """Used to filter list response by Droplet name returning only exact matches. + + It is case-insensitive and can not be combined with `tag_name`. + """ + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + tag_name: str + """Used to filter Droplets by a specific tag. + + Can not be combined with `name` or `type`. Requires `tag:read` scope. + """ + + type: Literal["droplets", "gpus"] + """When `type` is set to `gpus`, only GPU Droplets will be returned. + + By default, only non-GPU Droplets are returned. Can not be combined with + `tag_name`. + """ diff --git a/src/gradientai/types/gpu_droplet_list_response.py b/src/gradientai/types/gpu_droplet_list_response.py new file mode 100644 index 00000000..73e1e503 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.droplet import Droplet +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListResponse"] + + +class GPUDropletListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + droplets: Optional[List[Droplet]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_params.py b/src/gradientai/types/gpu_droplet_list_snapshots_params.py new file mode 100644 index 00000000..66e65a36 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_snapshots_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GPUDropletListSnapshotsParams"] + + +class GPUDropletListSnapshotsParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_response.py b/src/gradientai/types/gpu_droplet_list_snapshots_response.py new file mode 100644 index 00000000..4b34d670 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_snapshots_response.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListSnapshotsResponse", "Snapshot"] + + +class Snapshot(BaseModel): + id: int + """The unique identifier for the snapshot or backup.""" + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the snapshot was created. + """ + + min_disk_size: int + """The minimum size in GB required for a volume or Droplet to use this snapshot.""" + + name: str + """A human-readable name for the snapshot.""" + + regions: List[str] + """An array of the regions that the snapshot is available in. + + The regions are represented by their identifying slug values. + """ + + size_gigabytes: float + """The billable size of the snapshot in gigabytes.""" + + type: Literal["snapshot", "backup"] + """Describes the kind of image. + + It may be one of `snapshot` or `backup`. This specifies whether an image is a + user-generated Droplet snapshot or automatically created Droplet backup. + """ + + +class GPUDropletListSnapshotsResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + snapshots: Optional[List[Snapshot]] = None diff --git a/src/gradientai/types/gpu_droplet_retrieve_response.py b/src/gradientai/types/gpu_droplet_retrieve_response.py new file mode 100644 index 00000000..d8cc0f20 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .shared.droplet import Droplet + +__all__ = ["GPUDropletRetrieveResponse"] + + +class GPUDropletRetrieveResponse(BaseModel): + droplet: Optional[Droplet] = None diff --git a/src/gradientai/types/gpu_droplets/__init__.py b/src/gradientai/types/gpu_droplets/__init__.py new file mode 100644 index 00000000..c2f1835f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/__init__.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .domains import Domains as Domains +from .firewall import Firewall as Firewall +from .floating_ip import FloatingIP as FloatingIP +from .lb_firewall import LbFirewall as LbFirewall +from .glb_settings import GlbSettings as GlbSettings +from .health_check import HealthCheck as HealthCheck +from .domains_param import DomainsParam as DomainsParam +from .load_balancer import LoadBalancer as LoadBalancer +from .autoscale_pool import AutoscalePool as AutoscalePool +from .firewall_param import FirewallParam as FirewallParam +from .forwarding_rule import ForwardingRule as ForwardingRule +from .sticky_sessions import StickySessions as StickySessions +from .size_list_params import SizeListParams as SizeListParams +from .image_list_params import ImageListParams as ImageListParams +from .lb_firewall_param import LbFirewallParam as LbFirewallParam +from .action_list_params import ActionListParams as ActionListParams +from .backup_list_params import BackupListParams as BackupListParams +from .glb_settings_param import GlbSettingsParam as GlbSettingsParam +from .health_check_param import HealthCheckParam as HealthCheckParam +from .size_list_response import SizeListResponse as SizeListResponse +from .volume_list_params import VolumeListParams as VolumeListParams +from .associated_resource import AssociatedResource as AssociatedResource +from .current_utilization import CurrentUtilization as CurrentUtilization +from .image_create_params import ImageCreateParams as ImageCreateParams +from .image_list_response import ImageListResponse as ImageListResponse +from .image_update_params import ImageUpdateParams as ImageUpdateParams +from .action_list_response import ActionListResponse as ActionListResponse +from .backup_list_response import BackupListResponse as BackupListResponse +from .firewall_list_params import FirewallListParams as FirewallListParams +from .snapshot_list_params import SnapshotListParams as SnapshotListParams +from .volume_create_params import VolumeCreateParams as VolumeCreateParams +from .volume_list_response import VolumeListResponse as VolumeListResponse +from .autoscale_list_params import AutoscaleListParams as AutoscaleListParams +from .forwarding_rule_param import ForwardingRuleParam as ForwardingRuleParam +from .image_create_response import ImageCreateResponse as ImageCreateResponse +from .image_update_response import ImageUpdateResponse as ImageUpdateResponse +from .sticky_sessions_param import StickySessionsParam as StickySessionsParam +from .action_initiate_params import ActionInitiateParams as ActionInitiateParams +from .firewall_create_params import FirewallCreateParams as FirewallCreateParams +from .firewall_list_response import FirewallListResponse as FirewallListResponse +from .firewall_update_params import FirewallUpdateParams as FirewallUpdateParams +from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse +from .volume_create_response import VolumeCreateResponse as VolumeCreateResponse +from .autoscale_create_params import AutoscaleCreateParams as AutoscaleCreateParams +from .autoscale_list_response import AutoscaleListResponse as AutoscaleListResponse +from .autoscale_update_params import AutoscaleUpdateParams as AutoscaleUpdateParams +from .floating_ip_list_params import FloatingIPListParams as FloatingIPListParams +from .image_retrieve_response import ImageRetrieveResponse as ImageRetrieveResponse +from .action_initiate_response import ActionInitiateResponse as ActionInitiateResponse +from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse +from .firewall_create_response import FirewallCreateResponse as FirewallCreateResponse +from .firewall_update_response import FirewallUpdateResponse as FirewallUpdateResponse +from .volume_retrieve_response import VolumeRetrieveResponse as VolumeRetrieveResponse +from .autoscale_create_response import AutoscaleCreateResponse as AutoscaleCreateResponse +from .autoscale_update_response import AutoscaleUpdateResponse as AutoscaleUpdateResponse +from .floating_ip_create_params import FloatingIPCreateParams as FloatingIPCreateParams +from .floating_ip_list_response import FloatingIPListResponse as FloatingIPListResponse +from .load_balancer_list_params import LoadBalancerListParams as LoadBalancerListParams +from .firewall_retrieve_response import FirewallRetrieveResponse as FirewallRetrieveResponse +from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse +from .action_bulk_initiate_params import ActionBulkInitiateParams as ActionBulkInitiateParams +from .autoscale_retrieve_response import AutoscaleRetrieveResponse as AutoscaleRetrieveResponse +from .backup_list_policies_params import BackupListPoliciesParams as BackupListPoliciesParams +from .floating_ip_create_response import FloatingIPCreateResponse as FloatingIPCreateResponse +from .load_balancer_create_params import LoadBalancerCreateParams as LoadBalancerCreateParams +from .load_balancer_list_response import LoadBalancerListResponse as LoadBalancerListResponse +from .load_balancer_update_params import LoadBalancerUpdateParams as LoadBalancerUpdateParams +from .autoscale_pool_static_config import AutoscalePoolStaticConfig as AutoscalePoolStaticConfig +from .volume_delete_by_name_params import VolumeDeleteByNameParams as VolumeDeleteByNameParams +from .action_bulk_initiate_response import ActionBulkInitiateResponse as ActionBulkInitiateResponse +from .autoscale_list_history_params import AutoscaleListHistoryParams as AutoscaleListHistoryParams +from .autoscale_list_members_params import AutoscaleListMembersParams as AutoscaleListMembersParams +from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig as AutoscalePoolDynamicConfig +from .backup_list_policies_response import BackupListPoliciesResponse as BackupListPoliciesResponse +from .destroyed_associated_resource import DestroyedAssociatedResource as DestroyedAssociatedResource +from .floating_ip_retrieve_response import FloatingIPRetrieveResponse as FloatingIPRetrieveResponse +from .load_balancer_create_response import LoadBalancerCreateResponse as LoadBalancerCreateResponse +from .load_balancer_update_response import LoadBalancerUpdateResponse as LoadBalancerUpdateResponse +from .autoscale_list_history_response import AutoscaleListHistoryResponse as AutoscaleListHistoryResponse +from .autoscale_list_members_response import AutoscaleListMembersResponse as AutoscaleListMembersResponse +from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate as AutoscalePoolDropletTemplate +from .backup_retrieve_policy_response import BackupRetrievePolicyResponse as BackupRetrievePolicyResponse +from .load_balancer_retrieve_response import LoadBalancerRetrieveResponse as LoadBalancerRetrieveResponse +from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam as AutoscalePoolStaticConfigParam +from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam as AutoscalePoolDynamicConfigParam +from .autoscale_pool_droplet_template_param import ( + AutoscalePoolDropletTemplateParam as AutoscalePoolDropletTemplateParam, +) +from .backup_list_supported_policies_response import ( + BackupListSupportedPoliciesResponse as BackupListSupportedPoliciesResponse, +) +from .destroy_with_associated_resource_list_response import ( + DestroyWithAssociatedResourceListResponse as DestroyWithAssociatedResourceListResponse, +) +from .destroy_with_associated_resource_check_status_response import ( + DestroyWithAssociatedResourceCheckStatusResponse as DestroyWithAssociatedResourceCheckStatusResponse, +) +from .destroy_with_associated_resource_delete_selective_params import ( + DestroyWithAssociatedResourceDeleteSelectiveParams as DestroyWithAssociatedResourceDeleteSelectiveParams, +) diff --git a/src/gradientai/types/gpu_droplets/account/__init__.py b/src/gradientai/types/gpu_droplets/account/__init__.py new file mode 100644 index 00000000..4cd64974 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/__init__.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse diff --git a/src/gradientai/types/gpu_droplets/account/key_create_params.py b/src/gradientai/types/gpu_droplets/account/key_create_params.py new file mode 100644 index 00000000..4e7c1cef --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_create_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + name: Required[str] + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: Required[str] + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ diff --git a/src/gradientai/types/gpu_droplets/account/key_create_response.py b/src/gradientai/types/gpu_droplets/account/key_create_response.py new file mode 100644 index 00000000..9fe566ed --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_create_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["KeyCreateResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyCreateResponse(BaseModel): + ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/gpu_droplets/account/key_list_params.py b/src/gradientai/types/gpu_droplets/account/key_list_params.py new file mode 100644 index 00000000..44a455f3 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/account/key_list_response.py b/src/gradientai/types/gpu_droplets/account/key_list_response.py new file mode 100644 index 00000000..be4c721c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_list_response.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["KeyListResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + ssh_keys: Optional[List[SSHKey]] = None diff --git a/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py new file mode 100644 index 00000000..7cd3215e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["KeyRetrieveResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyRetrieveResponse(BaseModel): + ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/gpu_droplets/account/key_update_params.py b/src/gradientai/types/gpu_droplets/account/key_update_params.py new file mode 100644 index 00000000..e73d8b7b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ diff --git a/src/gradientai/types/gpu_droplets/account/key_update_response.py b/src/gradientai/types/gpu_droplets/account/key_update_response.py new file mode 100644 index 00000000..2821e44a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_update_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["KeyUpdateResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyUpdateResponse(BaseModel): + ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py new file mode 100644 index 00000000..a6402096 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py @@ -0,0 +1,72 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionBulkInitiateParams", "DropletAction", "DropletActionSnapshot"] + + +class DropletAction(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + tag_name: str + """Used to filter Droplets by a specific tag. + + Can not be combined with `name` or `type`. Requires `tag:read` scope. + """ + + +class DropletActionSnapshot(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + tag_name: str + """Used to filter Droplets by a specific tag. + + Can not be combined with `name` or `type`. Requires `tag:read` scope. + """ + + name: str + """The name to give the new snapshot of the Droplet.""" + + +ActionBulkInitiateParams: TypeAlias = Union[DropletAction, DropletActionSnapshot] diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py new file mode 100644 index 00000000..905860d7 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.action import Action + +__all__ = ["ActionBulkInitiateResponse"] + + +class ActionBulkInitiateResponse(BaseModel): + actions: Optional[List[Action]] = None diff --git a/src/gradientai/types/gpu_droplets/action_initiate_params.py b/src/gradientai/types/gpu_droplets/action_initiate_params.py new file mode 100644 index 00000000..f0ef6b1e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_initiate_params.py @@ -0,0 +1,278 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..droplet_backup_policy_param import DropletBackupPolicyParam + +__all__ = [ + "ActionInitiateParams", + "DropletAction", + "DropletActionEnableBackups", + "DropletActionChangeBackupPolicy", + "DropletActionRestore", + "DropletActionResize", + "DropletActionRebuild", + "DropletActionRename", + "DropletActionChangeKernel", + "DropletActionSnapshot", +] + + +class DropletAction(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + +class DropletActionEnableBackups(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet. + + If omitted, the backup plan will default to daily. + """ + + +class DropletActionChangeBackupPolicy(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet.""" + + +class DropletActionRestore(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + image: int + """The ID of a backup of the current Droplet instance to restore from.""" + + +class DropletActionResize(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + disk: bool + """When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. + + This is a permanent change and cannot be reversed as a Droplet's disk size + cannot be decreased. + """ + + size: str + """The slug identifier for the size to which you wish to resize the Droplet.""" + + +class DropletActionRebuild(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + image: Union[str, int] + """ + The image ID of a public or private image or the slug identifier for a public + image. The Droplet will be rebuilt using this image as its base. + """ + + +class DropletActionRename(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + name: str + """The new name for the Droplet.""" + + +class DropletActionChangeKernel(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + kernel: int + """A unique number used to identify and reference a specific kernel.""" + + +class DropletActionSnapshot(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + name: str + """The name to give the new snapshot of the Droplet.""" + + +ActionInitiateParams: TypeAlias = Union[ + DropletAction, + DropletActionEnableBackups, + DropletActionChangeBackupPolicy, + DropletActionRestore, + DropletActionResize, + DropletActionRebuild, + DropletActionRename, + DropletActionChangeKernel, + DropletActionSnapshot, +] diff --git a/src/gradientai/types/gpu_droplets/action_initiate_response.py b/src/gradientai/types/gpu_droplets/action_initiate_response.py new file mode 100644 index 00000000..087781d1 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_initiate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.action import Action + +__all__ = ["ActionInitiateResponse"] + + +class ActionInitiateResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/action_list_params.py b/src/gradientai/types/gpu_droplets/action_list_params.py new file mode 100644 index 00000000..dd873288 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ActionListParams"] + + +class ActionListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/action_list_response.py b/src/gradientai/types/gpu_droplets/action_list_response.py new file mode 100644 index 00000000..1a20f780 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.action import Action +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[Action]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/action_retrieve_response.py new file mode 100644 index 00000000..3856228d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.action import Action + +__all__ = ["ActionRetrieveResponse"] + + +class ActionRetrieveResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/associated_resource.py b/src/gradientai/types/gpu_droplets/associated_resource.py new file mode 100644 index 00000000..f72c3d32 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/associated_resource.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["AssociatedResource"] + + +class AssociatedResource(BaseModel): + id: Optional[str] = None + """The unique identifier for the resource associated with the Droplet.""" + + cost: Optional[str] = None + """ + The cost of the resource in USD per month if the resource is retained after the + Droplet is destroyed. + """ + + name: Optional[str] = None + """The name of the resource associated with the Droplet.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_params.py b/src/gradientai/types/gpu_droplets/autoscale_create_params.py new file mode 100644 index 00000000..0f3c05a6 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_create_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Required, TypeAlias, TypedDict + +from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam +from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam +from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam + +__all__ = ["AutoscaleCreateParams", "Config"] + + +class AutoscaleCreateParams(TypedDict, total=False): + config: Required[Config] + """ + The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + """ + + droplet_template: Required[AutoscalePoolDropletTemplateParam] + + name: Required[str] + """The human-readable name of the autoscale pool. This field cannot be updated""" + + +Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam] diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_response.py b/src/gradientai/types/gpu_droplets/autoscale_create_response.py new file mode 100644 index 00000000..819297e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool + +__all__ = ["AutoscaleCreateResponse"] + + +class AutoscaleCreateResponse(BaseModel): + autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py new file mode 100644 index 00000000..f837a11e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AutoscaleListHistoryParams"] + + +class AutoscaleListHistoryParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py new file mode 100644 index 00000000..843f44d8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["AutoscaleListHistoryResponse", "History"] + + +class History(BaseModel): + created_at: datetime + """ + The creation time of the history event in ISO8601 combined date and time format. + """ + + current_instance_count: int + """The current number of Droplets in the autoscale pool.""" + + desired_instance_count: int + """The target number of Droplets for the autoscale pool after the scaling event.""" + + history_event_id: str + """The unique identifier of the history event.""" + + reason: Literal["CONFIGURATION_CHANGE", "SCALE_UP", "SCALE_DOWN"] + """The reason for the scaling event.""" + + status: Literal["in_progress", "success", "error"] + """The status of the scaling event.""" + + updated_at: datetime + """ + The last updated time of the history event in ISO8601 combined date and time + format. + """ + + +class AutoscaleListHistoryResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + history: Optional[List[History]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py new file mode 100644 index 00000000..5a7f738d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AutoscaleListMembersParams"] + + +class AutoscaleListMembersParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py new file mode 100644 index 00000000..337ac4e3 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["AutoscaleListMembersResponse", "Droplet", "DropletCurrentUtilization"] + + +class DropletCurrentUtilization(BaseModel): + cpu: Optional[float] = None + """The CPU utilization average of the individual Droplet.""" + + memory: Optional[float] = None + """The memory utilization average of the individual Droplet.""" + + +class Droplet(BaseModel): + created_at: datetime + """The creation time of the Droplet in ISO8601 combined date and time format.""" + + current_utilization: DropletCurrentUtilization + + droplet_id: int + """The unique identifier of the Droplet.""" + + health_status: str + """The health status of the Droplet.""" + + status: Literal["provisioning", "active", "deleting", "off"] + """The power status of the Droplet.""" + + updated_at: datetime + """The last updated time of the Droplet in ISO8601 combined date and time format.""" + + +class AutoscaleListMembersResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + droplets: Optional[List[Droplet]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_params.py new file mode 100644 index 00000000..3a35e616 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AutoscaleListParams"] + + +class AutoscaleListParams(TypedDict, total=False): + name: str + """The name of the autoscale pool""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_response.py new file mode 100644 index 00000000..807cb17f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["AutoscaleListResponse"] + + +class AutoscaleListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + autoscale_pools: Optional[List[AutoscalePool]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool.py b/src/gradientai/types/gpu_droplets/autoscale_pool.py new file mode 100644 index 00000000..2964319e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from datetime import datetime +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .current_utilization import CurrentUtilization +from .autoscale_pool_static_config import AutoscalePoolStaticConfig +from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig +from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate + +__all__ = ["AutoscalePool", "Config"] + +Config: TypeAlias = Union[AutoscalePoolStaticConfig, AutoscalePoolDynamicConfig] + + +class AutoscalePool(BaseModel): + id: str + """A unique identifier for each autoscale pool instance. + + This is automatically generated upon autoscale pool creation. + """ + + active_resources_count: int + """The number of active Droplets in the autoscale pool.""" + + config: Config + """ + The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + """ + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the autoscale pool was created. + """ + + droplet_template: AutoscalePoolDropletTemplate + + name: str + """The human-readable name set for the autoscale pool.""" + + status: Literal["active", "deleting", "error"] + """The current status of the autoscale pool.""" + + updated_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the autoscale pool was last updated. + """ + + current_utilization: Optional[CurrentUtilization] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py new file mode 100644 index 00000000..2ab2036b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["AutoscalePoolDropletTemplate"] + + +class AutoscalePoolDropletTemplate(BaseModel): + image: str + """The Droplet image to be used for all Droplets in the autoscale pool. + + You may specify the slug or the image ID. + """ + + region: Literal[ + "nyc1", "nyc2", "nyc3", "ams2", "ams3", "sfo1", "sfo2", "sfo3", "sgp1", "lon1", "fra1", "tor1", "blr1", "syd1" + ] + """The datacenter in which all of the Droplets will be created.""" + + size: str + """The Droplet size to be used for all Droplets in the autoscale pool.""" + + ssh_keys: List[str] + """The SSH keys to be installed on the Droplets in the autoscale pool. + + You can either specify the key ID or the fingerprint. Requires `ssh_key:read` + scope. + """ + + ipv6: Optional[bool] = None + """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool.""" + + name: Optional[str] = None + """The name(s) to be applied to all Droplets in the autoscale pool.""" + + project_id: Optional[str] = None + """ + The project that the Droplets in the autoscale pool will belong to. Requires + `project:read` scope. + """ + + tags: Optional[List[str]] = None + """ + The tags to apply to each of the Droplets in the autoscale pool. Requires + `tag:read` scope. + """ + + user_data: Optional[str] = None + """ + A string containing user data that cloud-init consumes to configure a Droplet on + first boot. User data is often a cloud-config file or Bash script. It must be + plain text and may not exceed 64 KiB in size. + """ + + vpc_uuid: Optional[str] = None + """The VPC where the Droplets in the autoscale pool will be created. + + The VPC must be in the region where you want to create the Droplets. Requires + `vpc:read` scope. + """ + + with_droplet_agent: Optional[bool] = None + """Installs the Droplet agent. + + This must be set to true to monitor Droplets for resource utilization scaling. + """ diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py new file mode 100644 index 00000000..c491ed55 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py @@ -0,0 +1,84 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AutoscalePoolDropletTemplateParam"] + + +class AutoscalePoolDropletTemplateParam(TypedDict, total=False): + image: Required[str] + """The Droplet image to be used for all Droplets in the autoscale pool. + + You may specify the slug or the image ID. + """ + + region: Required[ + Literal[ + "nyc1", + "nyc2", + "nyc3", + "ams2", + "ams3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "lon1", + "fra1", + "tor1", + "blr1", + "syd1", + ] + ] + """The datacenter in which all of the Droplets will be created.""" + + size: Required[str] + """The Droplet size to be used for all Droplets in the autoscale pool.""" + + ssh_keys: Required[List[str]] + """The SSH keys to be installed on the Droplets in the autoscale pool. + + You can either specify the key ID or the fingerprint. Requires `ssh_key:read` + scope. + """ + + ipv6: bool + """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool.""" + + name: str + """The name(s) to be applied to all Droplets in the autoscale pool.""" + + project_id: str + """ + The project that the Droplets in the autoscale pool will belong to. Requires + `project:read` scope. + """ + + tags: List[str] + """ + The tags to apply to each of the Droplets in the autoscale pool. Requires + `tag:read` scope. + """ + + user_data: str + """ + A string containing user data that cloud-init consumes to configure a Droplet on + first boot. User data is often a cloud-config file or Bash script. It must be + plain text and may not exceed 64 KiB in size. + """ + + vpc_uuid: str + """The VPC where the Droplets in the autoscale pool will be created. + + The VPC must be in the region where you want to create the Droplets. Requires + `vpc:read` scope. + """ + + with_droplet_agent: bool + """Installs the Droplet agent. + + This must be set to true to monitor Droplets for resource utilization scaling. + """ diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py new file mode 100644 index 00000000..10f9781b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["AutoscalePoolDynamicConfig"] + + +class AutoscalePoolDynamicConfig(BaseModel): + max_instances: int + """The maximum number of Droplets in an autoscale pool.""" + + min_instances: int + """The minimum number of Droplets in an autoscale pool.""" + + cooldown_minutes: Optional[int] = None + """The number of minutes to wait between scaling events in an autoscale pool. + + Defaults to 10 minutes. + """ + + target_cpu_utilization: Optional[float] = None + """Target CPU utilization as a decimal.""" + + target_memory_utilization: Optional[float] = None + """Target memory utilization as a decimal.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py new file mode 100644 index 00000000..af06e73a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["AutoscalePoolDynamicConfigParam"] + + +class AutoscalePoolDynamicConfigParam(TypedDict, total=False): + max_instances: Required[int] + """The maximum number of Droplets in an autoscale pool.""" + + min_instances: Required[int] + """The minimum number of Droplets in an autoscale pool.""" + + cooldown_minutes: int + """The number of minutes to wait between scaling events in an autoscale pool. + + Defaults to 10 minutes. + """ + + target_cpu_utilization: float + """Target CPU utilization as a decimal.""" + + target_memory_utilization: float + """Target memory utilization as a decimal.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py new file mode 100644 index 00000000..cc891007 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["AutoscalePoolStaticConfig"] + + +class AutoscalePoolStaticConfig(BaseModel): + target_number_instances: int + """Fixed number of instances in an autoscale pool.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py new file mode 100644 index 00000000..a7510d22 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["AutoscalePoolStaticConfigParam"] + + +class AutoscalePoolStaticConfigParam(TypedDict, total=False): + target_number_instances: Required[int] + """Fixed number of instances in an autoscale pool.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py new file mode 100644 index 00000000..f383ed03 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool + +__all__ = ["AutoscaleRetrieveResponse"] + + +class AutoscaleRetrieveResponse(BaseModel): + autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_params.py b/src/gradientai/types/gpu_droplets/autoscale_update_params.py new file mode 100644 index 00000000..1b96af1e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_update_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Required, TypeAlias, TypedDict + +from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam +from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam +from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam + +__all__ = ["AutoscaleUpdateParams", "Config"] + + +class AutoscaleUpdateParams(TypedDict, total=False): + config: Required[Config] + """ + The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + """ + + droplet_template: Required[AutoscalePoolDropletTemplateParam] + + name: Required[str] + """The human-readable name of the autoscale pool. This field cannot be updated""" + + +Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam] diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_response.py b/src/gradientai/types/gpu_droplets/autoscale_update_response.py new file mode 100644 index 00000000..09dde2a4 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool + +__all__ = ["AutoscaleUpdateResponse"] + + +class AutoscaleUpdateResponse(BaseModel): + autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/gpu_droplets/backup_list_params.py b/src/gradientai/types/gpu_droplets/backup_list_params.py new file mode 100644 index 00000000..66fe92aa --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["BackupListParams"] + + +class BackupListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/gradientai/types/gpu_droplets/backup_list_policies_params.py new file mode 100644 index 00000000..0cdb0ddb --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_policies_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["BackupListPoliciesParams"] + + +class BackupListPoliciesParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/gradientai/types/gpu_droplets/backup_list_policies_response.py new file mode 100644 index 00000000..73aa9458 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_policies_response.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..droplet_backup_policy import DropletBackupPolicy +from ..shared.meta_properties import MetaProperties +from ..shared.droplet_next_backup_window import DropletNextBackupWindow + +__all__ = ["BackupListPoliciesResponse", "Policies"] + + +class Policies(BaseModel): + backup_enabled: Optional[bool] = None + """A boolean value indicating whether backups are enabled for the Droplet.""" + + backup_policy: Optional[DropletBackupPolicy] = None + """An object specifying the backup policy for the Droplet.""" + + droplet_id: Optional[int] = None + """The unique identifier for the Droplet.""" + + next_backup_window: Optional[DropletNextBackupWindow] = None + """ + An object containing keys with the start and end times of the window during + which the backup will occur. + """ + + +class BackupListPoliciesResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + policies: Optional[Dict[str, Policies]] = None + """ + A map where the keys are the Droplet IDs and the values are objects containing + the backup policy information for each Droplet. + """ diff --git a/src/gradientai/types/gpu_droplets/backup_list_response.py b/src/gradientai/types/gpu_droplets/backup_list_response.py new file mode 100644 index 00000000..c96d573a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_response.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["BackupListResponse", "Backup"] + + +class Backup(BaseModel): + id: int + """The unique identifier for the snapshot or backup.""" + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the snapshot was created. + """ + + min_disk_size: int + """The minimum size in GB required for a volume or Droplet to use this snapshot.""" + + name: str + """A human-readable name for the snapshot.""" + + regions: List[str] + """An array of the regions that the snapshot is available in. + + The regions are represented by their identifying slug values. + """ + + size_gigabytes: float + """The billable size of the snapshot in gigabytes.""" + + type: Literal["snapshot", "backup"] + """Describes the kind of image. + + It may be one of `snapshot` or `backup`. This specifies whether an image is a + user-generated Droplet snapshot or automatically created Droplet backup. + """ + + +class BackupListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + backups: Optional[List[Backup]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py new file mode 100644 index 00000000..219cfc34 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["BackupListSupportedPoliciesResponse", "SupportedPolicy"] + + +class SupportedPolicy(BaseModel): + name: Optional[str] = None + """The name of the Droplet backup plan.""" + + possible_days: Optional[List[str]] = None + """The day of the week the backup will occur.""" + + possible_window_starts: Optional[List[int]] = None + """An array of integers representing the hours of the day that a backup can start.""" + + retention_period_days: Optional[int] = None + """The number of days that a backup will be kept.""" + + window_length_hours: Optional[int] = None + """The number of hours that a backup window is open.""" + + +class BackupListSupportedPoliciesResponse(BaseModel): + supported_policies: Optional[List[SupportedPolicy]] = None diff --git a/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py new file mode 100644 index 00000000..38288dea --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..droplet_backup_policy import DropletBackupPolicy +from ..shared.droplet_next_backup_window import DropletNextBackupWindow + +__all__ = ["BackupRetrievePolicyResponse", "Policy"] + + +class Policy(BaseModel): + backup_enabled: Optional[bool] = None + """A boolean value indicating whether backups are enabled for the Droplet.""" + + backup_policy: Optional[DropletBackupPolicy] = None + """An object specifying the backup policy for the Droplet.""" + + droplet_id: Optional[int] = None + """The unique identifier for the Droplet.""" + + next_backup_window: Optional[DropletNextBackupWindow] = None + """ + An object containing keys with the start and end times of the window during + which the backup will occur. + """ + + +class BackupRetrievePolicyResponse(BaseModel): + policy: Optional[Policy] = None diff --git a/src/gradientai/types/gpu_droplets/current_utilization.py b/src/gradientai/types/gpu_droplets/current_utilization.py new file mode 100644 index 00000000..f2cb0b6c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/current_utilization.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["CurrentUtilization"] + + +class CurrentUtilization(BaseModel): + cpu: Optional[float] = None + """The average CPU utilization of the autoscale pool.""" + + memory: Optional[float] = None + """The average memory utilization of the autoscale pool.""" diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py new file mode 100644 index 00000000..f2f2ff67 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel +from .destroyed_associated_resource import DestroyedAssociatedResource + +__all__ = ["DestroyWithAssociatedResourceCheckStatusResponse", "Resources"] + + +class Resources(BaseModel): + floating_ips: Optional[List[DestroyedAssociatedResource]] = None + + reserved_ips: Optional[List[DestroyedAssociatedResource]] = None + + snapshots: Optional[List[DestroyedAssociatedResource]] = None + + volume_snapshots: Optional[List[DestroyedAssociatedResource]] = None + + volumes: Optional[List[DestroyedAssociatedResource]] = None + + +class DestroyWithAssociatedResourceCheckStatusResponse(BaseModel): + completed_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format indicating when the + requested action was completed. + """ + + droplet: Optional[DestroyedAssociatedResource] = None + """An object containing information about a resource scheduled for deletion.""" + + failures: Optional[int] = None + """A count of the associated resources that failed to be destroyed, if any.""" + + resources: Optional[Resources] = None + """ + An object containing additional information about resource related to a Droplet + requested to be destroyed. + """ diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py new file mode 100644 index 00000000..f4037b6b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["DestroyWithAssociatedResourceDeleteSelectiveParams"] + + +class DestroyWithAssociatedResourceDeleteSelectiveParams(TypedDict, total=False): + floating_ips: List[str] + """ + An array of unique identifiers for the floating IPs to be scheduled for + deletion. + """ + + reserved_ips: List[str] + """ + An array of unique identifiers for the reserved IPs to be scheduled for + deletion. + """ + + snapshots: List[str] + """An array of unique identifiers for the snapshots to be scheduled for deletion.""" + + volume_snapshots: List[str] + """ + An array of unique identifiers for the volume snapshots to be scheduled for + deletion. + """ + + volumes: List[str] + """An array of unique identifiers for the volumes to be scheduled for deletion.""" diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py new file mode 100644 index 00000000..ef4c6c99 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .associated_resource import AssociatedResource + +__all__ = ["DestroyWithAssociatedResourceListResponse"] + + +class DestroyWithAssociatedResourceListResponse(BaseModel): + floating_ips: Optional[List[AssociatedResource]] = None + """ + Floating IPs that are associated with this Droplet. Requires `reserved_ip:read` + scope. + """ + + reserved_ips: Optional[List[AssociatedResource]] = None + """ + Reserved IPs that are associated with this Droplet. Requires `reserved_ip:read` + scope. + """ + + snapshots: Optional[List[AssociatedResource]] = None + """Snapshots that are associated with this Droplet. Requires `image:read` scope.""" + + volume_snapshots: Optional[List[AssociatedResource]] = None + """ + Volume Snapshots that are associated with this Droplet. Requires + `block_storage_snapshot:read` scope. + """ + + volumes: Optional[List[AssociatedResource]] = None + """ + Volumes that are associated with this Droplet. Requires `block_storage:read` + scope. + """ diff --git a/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py new file mode 100644 index 00000000..358c14e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["DestroyedAssociatedResource"] + + +class DestroyedAssociatedResource(BaseModel): + id: Optional[str] = None + """The unique identifier for the resource scheduled for deletion.""" + + destroyed_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format indicating when the + resource was destroyed if the request was successful. + """ + + error_message: Optional[str] = None + """ + A string indicating that the resource was not successfully destroyed and + providing additional information. + """ + + name: Optional[str] = None + """The name of the resource scheduled for deletion.""" diff --git a/src/gradientai/types/gpu_droplets/domains.py b/src/gradientai/types/gpu_droplets/domains.py new file mode 100644 index 00000000..6a9400f9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/domains.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["Domains"] + + +class Domains(BaseModel): + certificate_id: Optional[str] = None + """The ID of the TLS certificate used for SSL termination.""" + + is_managed: Optional[bool] = None + """A boolean value indicating if the domain is already managed by DigitalOcean. + + If true, all A and AAAA records required to enable Global load balancers will be + automatically added. + """ + + name: Optional[str] = None + """FQDN to associate with a Global load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/domains_param.py b/src/gradientai/types/gpu_droplets/domains_param.py new file mode 100644 index 00000000..d2d21faf --- /dev/null +++ b/src/gradientai/types/gpu_droplets/domains_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["DomainsParam"] + + +class DomainsParam(TypedDict, total=False): + certificate_id: str + """The ID of the TLS certificate used for SSL termination.""" + + is_managed: bool + """A boolean value indicating if the domain is already managed by DigitalOcean. + + If true, all A and AAAA records required to enable Global load balancers will be + automatically added. + """ + + name: str + """FQDN to associate with a Global load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/firewall.py b/src/gradientai/types/gpu_droplets/firewall.py new file mode 100644 index 00000000..0eb352a1 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.firewall_rule_target import FirewallRuleTarget + +__all__ = ["Firewall", "InboundRule", "OutboundRule", "PendingChange"] + + +class InboundRule(BaseModel): + ports: str + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Literal["tcp", "udp", "icmp"] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: FirewallRuleTarget + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(BaseModel): + destinations: FirewallRuleTarget + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: str + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Literal["tcp", "udp", "icmp"] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + +class PendingChange(BaseModel): + droplet_id: Optional[int] = None + + removing: Optional[bool] = None + + status: Optional[str] = None + + +class Firewall(BaseModel): + id: Optional[str] = None + """A unique ID that can be used to identify and reference a firewall.""" + + created_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the firewall was created. + """ + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets assigned to the firewall. + + Requires `droplet:read` scope. + """ + + inbound_rules: Optional[List[InboundRule]] = None + + name: Optional[str] = None + """A human-readable name for a firewall. + + The name must begin with an alphanumeric character. Subsequent characters must + either be alphanumeric characters, a period (.), or a dash (-). + """ + + outbound_rules: Optional[List[OutboundRule]] = None + + pending_changes: Optional[List[PendingChange]] = None + """ + An array of objects each containing the fields "droplet_id", "removing", and + "status". It is provided to detail exactly which Droplets are having their + security policies updated. When empty, all changes have been successfully + applied. + """ + + status: Optional[Literal["waiting", "succeeded", "failed"]] = None + """A status string indicating the current state of the firewall. + + This can be "waiting", "succeeded", or "failed". + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/firewall_create_params.py b/src/gradientai/types/gpu_droplets/firewall_create_params.py new file mode 100644 index 00000000..b10ae98e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from .firewall_param import FirewallParam + +__all__ = ["FirewallCreateParams", "Body"] + + +class FirewallCreateParams(TypedDict, total=False): + body: Body + + +class Body(FirewallParam, total=False): + pass diff --git a/src/gradientai/types/gpu_droplets/firewall_create_response.py b/src/gradientai/types/gpu_droplets/firewall_create_response.py new file mode 100644 index 00000000..be30113a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .firewall import Firewall +from ..._models import BaseModel + +__all__ = ["FirewallCreateResponse"] + + +class FirewallCreateResponse(BaseModel): + firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/gpu_droplets/firewall_list_params.py b/src/gradientai/types/gpu_droplets/firewall_list_params.py new file mode 100644 index 00000000..155cc480 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["FirewallListParams"] + + +class FirewallListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/firewall_list_response.py b/src/gradientai/types/gpu_droplets/firewall_list_response.py new file mode 100644 index 00000000..ec0af688 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .firewall import Firewall +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["FirewallListResponse"] + + +class FirewallListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + firewalls: Optional[List[Firewall]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/firewall_param.py b/src/gradientai/types/gpu_droplets/firewall_param.py new file mode 100644 index 00000000..1be9cf6a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_param.py @@ -0,0 +1,67 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.firewall_rule_target import FirewallRuleTarget + +__all__ = ["FirewallParam", "InboundRule", "OutboundRule"] + + +class InboundRule(TypedDict, total=False): + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: Required[FirewallRuleTarget] + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(TypedDict, total=False): + destinations: Required[FirewallRuleTarget] + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + +class FirewallParam(TypedDict, total=False): + droplet_ids: Optional[Iterable[int]] + """An array containing the IDs of the Droplets assigned to the firewall. + + Requires `droplet:read` scope. + """ + + inbound_rules: Optional[Iterable[InboundRule]] + + name: str + """A human-readable name for a firewall. + + The name must begin with an alphanumeric character. Subsequent characters must + either be alphanumeric characters, a period (.), or a dash (-). + """ + + outbound_rules: Optional[Iterable[OutboundRule]] + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py new file mode 100644 index 00000000..bb29a174 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .firewall import Firewall +from ..._models import BaseModel + +__all__ = ["FirewallRetrieveResponse"] + + +class FirewallRetrieveResponse(BaseModel): + firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/gpu_droplets/firewall_update_params.py b/src/gradientai/types/gpu_droplets/firewall_update_params.py new file mode 100644 index 00000000..c2d0691d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_update_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .firewall_param import FirewallParam + +__all__ = ["FirewallUpdateParams"] + + +class FirewallUpdateParams(TypedDict, total=False): + firewall: Required[FirewallParam] diff --git a/src/gradientai/types/gpu_droplets/firewall_update_response.py b/src/gradientai/types/gpu_droplets/firewall_update_response.py new file mode 100644 index 00000000..cb8ff702 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .firewall import Firewall +from ..._models import BaseModel + +__all__ = ["FirewallUpdateResponse"] + + +class FirewallUpdateResponse(BaseModel): + firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/gpu_droplets/firewalls/__init__.py b/src/gradientai/types/gpu_droplets/firewalls/__init__.py new file mode 100644 index 00000000..6ba459d9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .tag_add_params import TagAddParams as TagAddParams +from .rule_add_params import RuleAddParams as RuleAddParams +from .tag_remove_params import TagRemoveParams as TagRemoveParams +from .droplet_add_params import DropletAddParams as DropletAddParams +from .rule_remove_params import RuleRemoveParams as RuleRemoveParams +from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py new file mode 100644 index 00000000..35a403a5 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletAddParams"] + + +class DropletAddParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets to be assigned to the firewall.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py new file mode 100644 index 00000000..5aea18e8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletRemoveParams"] + + +class DropletRemoveParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets to be removed from the firewall.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py new file mode 100644 index 00000000..1f49e55a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from ...shared_params.firewall_rule_target import FirewallRuleTarget + +__all__ = ["RuleAddParams", "InboundRule", "OutboundRule"] + + +class RuleAddParams(TypedDict, total=False): + inbound_rules: Optional[Iterable[InboundRule]] + + outbound_rules: Optional[Iterable[OutboundRule]] + + +class InboundRule(TypedDict, total=False): + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: Required[FirewallRuleTarget] + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(TypedDict, total=False): + destinations: Required[FirewallRuleTarget] + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py new file mode 100644 index 00000000..b6bb05df --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from ...shared_params.firewall_rule_target import FirewallRuleTarget + +__all__ = ["RuleRemoveParams", "InboundRule", "OutboundRule"] + + +class RuleRemoveParams(TypedDict, total=False): + inbound_rules: Optional[Iterable[InboundRule]] + + outbound_rules: Optional[Iterable[OutboundRule]] + + +class InboundRule(TypedDict, total=False): + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: Required[FirewallRuleTarget] + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(TypedDict, total=False): + destinations: Required[FirewallRuleTarget] + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py new file mode 100644 index 00000000..63af7640 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["TagAddParams"] + + +class TagAddParams(TypedDict, total=False): + tags: Required[Optional[List[str]]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py new file mode 100644 index 00000000..91a3e382 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["TagRemoveParams"] + + +class TagRemoveParams(TypedDict, total=False): + tags: Required[Optional[List[str]]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/floating_ip.py b/src/gradientai/types/gpu_droplets/floating_ip.py new file mode 100644 index 00000000..81c58753 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import TypeAlias + +from ..shared import region, droplet +from ..._models import BaseModel + +__all__ = ["FloatingIP", "Droplet", "Region"] + +Droplet: TypeAlias = Union[droplet.Droplet, Optional[object]] + + +class Region(region.Region): + pass + + +class FloatingIP(BaseModel): + droplet: Optional[Droplet] = None + """The Droplet that the floating IP has been assigned to. + + When you query a floating IP, if it is assigned to a Droplet, the entire Droplet + object will be returned. If it is not assigned, the value will be null. + + Requires `droplet:read` scope. + """ + + ip: Optional[str] = None + """The public IP address of the floating IP. It also serves as its identifier.""" + + locked: Optional[bool] = None + """ + A boolean value indicating whether or not the floating IP has pending actions + preventing new ones from being submitted. + """ + + project_id: Optional[str] = None + """The UUID of the project to which the reserved IP currently belongs. + + Requires `project:read` scope. + """ + + region: Optional[Region] = None + """The region that the floating IP is reserved to. + + When you query a floating IP, the entire region object will be returned. + """ diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/gradientai/types/gpu_droplets/floating_ip_create_params.py new file mode 100644 index 00000000..2adadc27 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_create_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Required, TypeAlias, TypedDict + +__all__ = ["FloatingIPCreateParams", "AssignToDroplet", "ReserveToRegion"] + + +class AssignToDroplet(TypedDict, total=False): + droplet_id: Required[int] + """The ID of the Droplet that the floating IP will be assigned to.""" + + +class ReserveToRegion(TypedDict, total=False): + region: Required[str] + """The slug identifier for the region the floating IP will be reserved to.""" + + project_id: str + """The UUID of the project to which the floating IP will be assigned.""" + + +FloatingIPCreateParams: TypeAlias = Union[AssignToDroplet, ReserveToRegion] diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/gradientai/types/gpu_droplets/floating_ip_create_response.py new file mode 100644 index 00000000..04668b84 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_create_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .floating_ip import FloatingIP +from ..shared.action_link import ActionLink + +__all__ = ["FloatingIPCreateResponse", "Links"] + + +class Links(BaseModel): + actions: Optional[List[ActionLink]] = None + + droplets: Optional[List[ActionLink]] = None + + +class FloatingIPCreateResponse(BaseModel): + floating_ip: Optional[FloatingIP] = None + + links: Optional[Links] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/gradientai/types/gpu_droplets/floating_ip_list_params.py new file mode 100644 index 00000000..2e054075 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["FloatingIPListParams"] + + +class FloatingIPListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/gradientai/types/gpu_droplets/floating_ip_list_response.py new file mode 100644 index 00000000..734011d2 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .floating_ip import FloatingIP +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["FloatingIPListResponse"] + + +class FloatingIPListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + floating_ips: Optional[List[FloatingIP]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py new file mode 100644 index 00000000..b7ec77d4 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .floating_ip import FloatingIP + +__all__ = ["FloatingIPRetrieveResponse"] + + +class FloatingIPRetrieveResponse(BaseModel): + floating_ip: Optional[FloatingIP] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/gradientai/types/gpu_droplets/floating_ips/__init__.py new file mode 100644 index 00000000..a597418e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .action_create_params import ActionCreateParams as ActionCreateParams +from .action_list_response import ActionListResponse as ActionListResponse +from .action_create_response import ActionCreateResponse as ActionCreateResponse +from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py new file mode 100644 index 00000000..c84f5df7 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionCreateParams", "FloatingIPActionUnassign", "FloatingIPActionAssign"] + + +class FloatingIPActionUnassign(TypedDict, total=False): + type: Required[Literal["assign", "unassign"]] + """The type of action to initiate for the floating IP.""" + + +class FloatingIPActionAssign(TypedDict, total=False): + droplet_id: Required[int] + """The ID of the Droplet that the floating IP will be assigned to.""" + + type: Required[Literal["assign", "unassign"]] + """The type of action to initiate for the floating IP.""" + + +ActionCreateParams: TypeAlias = Union[FloatingIPActionUnassign, FloatingIPActionAssign] diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py new file mode 100644 index 00000000..90acd8c9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...shared import action +from ...._models import BaseModel + +__all__ = ["ActionCreateResponse", "Action"] + + +class Action(action.Action): + project_id: Optional[str] = None + """The UUID of the project to which the reserved IP currently belongs.""" + + +class ActionCreateResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py new file mode 100644 index 00000000..2f4edac5 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.action import Action +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[Action]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py new file mode 100644 index 00000000..d94554be --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...shared import action +from ...._models import BaseModel + +__all__ = ["ActionRetrieveResponse", "Action"] + + +class Action(action.Action): + project_id: Optional[str] = None + """The UUID of the project to which the reserved IP currently belongs.""" + + +class ActionRetrieveResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule.py b/src/gradientai/types/gpu_droplets/forwarding_rule.py new file mode 100644 index 00000000..40a310ab --- /dev/null +++ b/src/gradientai/types/gpu_droplets/forwarding_rule.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ForwardingRule"] + + +class ForwardingRule(BaseModel): + entry_port: int + """ + An integer representing the port on which the load balancer instance will + listen. + """ + + entry_protocol: Literal["http", "https", "http2", "http3", "tcp", "udp"] + """The protocol used for traffic to the load balancer. + + The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If + you set the `entry_protocol` to `udp`, the `target_protocol` must be set to + `udp`. When using UDP, the load balancer requires that you set up a health check + with a port that uses TCP, HTTP, or HTTPS to work properly. + """ + + target_port: int + """ + An integer representing the port on the backend Droplets to which the load + balancer will send traffic. + """ + + target_protocol: Literal["http", "https", "http2", "tcp", "udp"] + """The protocol used for traffic from the load balancer to the backend Droplets. + + The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set + the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When + using UDP, the load balancer requires that you set up a health check with a port + that uses TCP, HTTP, or HTTPS to work properly. + """ + + certificate_id: Optional[str] = None + """The ID of the TLS certificate used for SSL termination if enabled.""" + + tls_passthrough: Optional[bool] = None + """ + A boolean value indicating whether SSL encrypted traffic will be passed through + to the backend Droplets. + """ diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/gradientai/types/gpu_droplets/forwarding_rule_param.py new file mode 100644 index 00000000..70285bf6 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/forwarding_rule_param.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ForwardingRuleParam"] + + +class ForwardingRuleParam(TypedDict, total=False): + entry_port: Required[int] + """ + An integer representing the port on which the load balancer instance will + listen. + """ + + entry_protocol: Required[Literal["http", "https", "http2", "http3", "tcp", "udp"]] + """The protocol used for traffic to the load balancer. + + The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If + you set the `entry_protocol` to `udp`, the `target_protocol` must be set to + `udp`. When using UDP, the load balancer requires that you set up a health check + with a port that uses TCP, HTTP, or HTTPS to work properly. + """ + + target_port: Required[int] + """ + An integer representing the port on the backend Droplets to which the load + balancer will send traffic. + """ + + target_protocol: Required[Literal["http", "https", "http2", "tcp", "udp"]] + """The protocol used for traffic from the load balancer to the backend Droplets. + + The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set + the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When + using UDP, the load balancer requires that you set up a health check with a port + that uses TCP, HTTP, or HTTPS to work properly. + """ + + certificate_id: str + """The ID of the TLS certificate used for SSL termination if enabled.""" + + tls_passthrough: bool + """ + A boolean value indicating whether SSL encrypted traffic will be passed through + to the backend Droplets. + """ diff --git a/src/gradientai/types/gpu_droplets/glb_settings.py b/src/gradientai/types/gpu_droplets/glb_settings.py new file mode 100644 index 00000000..9aa790d8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/glb_settings.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["GlbSettings", "Cdn"] + + +class Cdn(BaseModel): + is_enabled: Optional[bool] = None + """A boolean flag to enable CDN caching.""" + + +class GlbSettings(BaseModel): + cdn: Optional[Cdn] = None + """An object specifying CDN configurations for a Global load balancer.""" + + failover_threshold: Optional[int] = None + """ + An integer value as a percentage to indicate failure threshold to decide how the + regional priorities will take effect. A value of `50` would indicate that the + Global load balancer will choose a lower priority region to forward traffic to + once this failure threshold has been reached for the higher priority region. + """ + + region_priorities: Optional[Dict[str, int]] = None + """ + A map of region string to an integer priority value indicating preference for + which regional target a Global load balancer will forward traffic to. A lower + value indicates a higher priority. + """ + + target_port: Optional[int] = None + """ + An integer representing the port on the target backends which the load balancer + will forward traffic to. + """ + + target_protocol: Optional[Literal["http", "https", "http2"]] = None + """ + The protocol used for forwarding traffic from the load balancer to the target + backends. The possible values are `http`, `https` and `http2`. + """ diff --git a/src/gradientai/types/gpu_droplets/glb_settings_param.py b/src/gradientai/types/gpu_droplets/glb_settings_param.py new file mode 100644 index 00000000..f1b25c8b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/glb_settings_param.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import Literal, TypedDict + +__all__ = ["GlbSettingsParam", "Cdn"] + + +class Cdn(TypedDict, total=False): + is_enabled: bool + """A boolean flag to enable CDN caching.""" + + +class GlbSettingsParam(TypedDict, total=False): + cdn: Cdn + """An object specifying CDN configurations for a Global load balancer.""" + + failover_threshold: int + """ + An integer value as a percentage to indicate failure threshold to decide how the + regional priorities will take effect. A value of `50` would indicate that the + Global load balancer will choose a lower priority region to forward traffic to + once this failure threshold has been reached for the higher priority region. + """ + + region_priorities: Dict[str, int] + """ + A map of region string to an integer priority value indicating preference for + which regional target a Global load balancer will forward traffic to. A lower + value indicates a higher priority. + """ + + target_port: int + """ + An integer representing the port on the target backends which the load balancer + will forward traffic to. + """ + + target_protocol: Literal["http", "https", "http2"] + """ + The protocol used for forwarding traffic from the load balancer to the target + backends. The possible values are `http`, `https` and `http2`. + """ diff --git a/src/gradientai/types/gpu_droplets/health_check.py b/src/gradientai/types/gpu_droplets/health_check.py new file mode 100644 index 00000000..db44d84e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/health_check.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["HealthCheck"] + + +class HealthCheck(BaseModel): + check_interval_seconds: Optional[int] = None + """The number of seconds between between two consecutive health checks.""" + + healthy_threshold: Optional[int] = None + """ + The number of times a health check must pass for a backend Droplet to be marked + "healthy" and be re-added to the pool. + """ + + path: Optional[str] = None + """ + The path on the backend Droplets to which the load balancer instance will send a + request. + """ + + port: Optional[int] = None + """ + An integer representing the port on the backend Droplets on which the health + check will attempt a connection. + """ + + protocol: Optional[Literal["http", "https", "tcp"]] = None + """The protocol used for health checks sent to the backend Droplets. + + The possible values are `http`, `https`, or `tcp`. + """ + + response_timeout_seconds: Optional[int] = None + """ + The number of seconds the load balancer instance will wait for a response until + marking a health check as failed. + """ + + unhealthy_threshold: Optional[int] = None + """ + The number of times a health check must fail for a backend Droplet to be marked + "unhealthy" and be removed from the pool. + """ diff --git a/src/gradientai/types/gpu_droplets/health_check_param.py b/src/gradientai/types/gpu_droplets/health_check_param.py new file mode 100644 index 00000000..e840f818 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/health_check_param.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["HealthCheckParam"] + + +class HealthCheckParam(TypedDict, total=False): + check_interval_seconds: int + """The number of seconds between between two consecutive health checks.""" + + healthy_threshold: int + """ + The number of times a health check must pass for a backend Droplet to be marked + "healthy" and be re-added to the pool. + """ + + path: str + """ + The path on the backend Droplets to which the load balancer instance will send a + request. + """ + + port: int + """ + An integer representing the port on the backend Droplets on which the health + check will attempt a connection. + """ + + protocol: Literal["http", "https", "tcp"] + """The protocol used for health checks sent to the backend Droplets. + + The possible values are `http`, `https`, or `tcp`. + """ + + response_timeout_seconds: int + """ + The number of seconds the load balancer instance will wait for a response until + marking a health check as failed. + """ + + unhealthy_threshold: int + """ + The number of times a health check must fail for a backend Droplet to be marked + "unhealthy" and be removed from the pool. + """ diff --git a/src/gradientai/types/gpu_droplets/image_create_params.py b/src/gradientai/types/gpu_droplets/image_create_params.py new file mode 100644 index 00000000..efbd684c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_create_params.py @@ -0,0 +1,81 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, TypedDict + +__all__ = ["ImageCreateParams"] + + +class ImageCreateParams(TypedDict, total=False): + description: str + """An optional free-form text field to describe an image.""" + + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + """The name of a custom image's distribution. + + Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, + `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, + `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but + ignored, and `Unknown` will be used in its place. + """ + + name: str + """The display name that has been given to an image. + + This is what is shown in the control panel and is generally a descriptive title + for the image in question. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + url: str + """A URL from which the custom Linux virtual machine image may be retrieved. + + The image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It + may be compressed using gzip or bzip2 and must be smaller than 100 GB after + being decompressed. + """ diff --git a/src/gradientai/types/gpu_droplets/image_create_response.py b/src/gradientai/types/gpu_droplets/image_create_response.py new file mode 100644 index 00000000..87ebbb01 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.image import Image + +__all__ = ["ImageCreateResponse"] + + +class ImageCreateResponse(BaseModel): + image: Optional[Image] = None diff --git a/src/gradientai/types/gpu_droplets/image_list_params.py b/src/gradientai/types/gpu_droplets/image_list_params.py new file mode 100644 index 00000000..d8e90efa --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_list_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ImageListParams"] + + +class ImageListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + private: bool + """Used to filter only user images.""" + + tag_name: str + """Used to filter images by a specific tag.""" + + type: Literal["application", "distribution"] + """ + Filters results based on image type which can be either `application` or + `distribution`. + """ diff --git a/src/gradientai/types/gpu_droplets/image_list_response.py b/src/gradientai/types/gpu_droplets/image_list_response.py new file mode 100644 index 00000000..d4bb5697 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.image import Image +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["ImageListResponse"] + + +class ImageListResponse(BaseModel): + images: List[Image] + + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/image_retrieve_response.py b/src/gradientai/types/gpu_droplets/image_retrieve_response.py new file mode 100644 index 00000000..394dd384 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_retrieve_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel +from ..shared.image import Image + +__all__ = ["ImageRetrieveResponse"] + + +class ImageRetrieveResponse(BaseModel): + image: Image diff --git a/src/gradientai/types/gpu_droplets/image_update_params.py b/src/gradientai/types/gpu_droplets/image_update_params.py new file mode 100644 index 00000000..2ff851f8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_update_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ImageUpdateParams"] + + +class ImageUpdateParams(TypedDict, total=False): + description: str + """An optional free-form text field to describe an image.""" + + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + """The name of a custom image's distribution. + + Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, + `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, + `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but + ignored, and `Unknown` will be used in its place. + """ + + name: str + """The display name that has been given to an image. + + This is what is shown in the control panel and is generally a descriptive title + for the image in question. + """ diff --git a/src/gradientai/types/gpu_droplets/image_update_response.py b/src/gradientai/types/gpu_droplets/image_update_response.py new file mode 100644 index 00000000..3d07f5ac --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_update_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel +from ..shared.image import Image + +__all__ = ["ImageUpdateResponse"] + + +class ImageUpdateResponse(BaseModel): + image: Image diff --git a/src/gradientai/types/gpu_droplets/images/__init__.py b/src/gradientai/types/gpu_droplets/images/__init__.py new file mode 100644 index 00000000..7e78954c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/images/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .action_create_params import ActionCreateParams as ActionCreateParams +from .action_list_response import ActionListResponse as ActionListResponse diff --git a/src/gradientai/types/gpu_droplets/images/action_create_params.py b/src/gradientai/types/gpu_droplets/images/action_create_params.py new file mode 100644 index 00000000..a1b57d47 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/images/action_create_params.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionCreateParams", "ImageActionBase", "ImageActionTransfer"] + + +class ImageActionBase(TypedDict, total=False): + type: Required[Literal["convert", "transfer"]] + """The action to be taken on the image. Can be either `convert` or `transfer`.""" + + +class ImageActionTransfer(TypedDict, total=False): + region: Required[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + type: Required[Literal["convert", "transfer"]] + """The action to be taken on the image. Can be either `convert` or `transfer`.""" + + +ActionCreateParams: TypeAlias = Union[ImageActionBase, ImageActionTransfer] diff --git a/src/gradientai/types/gpu_droplets/images/action_list_response.py b/src/gradientai/types/gpu_droplets/images/action_list_response.py new file mode 100644 index 00000000..2f4edac5 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/images/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.action import Action +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[Action]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/lb_firewall.py b/src/gradientai/types/gpu_droplets/lb_firewall.py new file mode 100644 index 00000000..aea1887c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/lb_firewall.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["LbFirewall"] + + +class LbFirewall(BaseModel): + allow: Optional[List[str]] = None + """ + the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ + + deny: Optional[List[str]] = None + """ + the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ diff --git a/src/gradientai/types/gpu_droplets/lb_firewall_param.py b/src/gradientai/types/gpu_droplets/lb_firewall_param.py new file mode 100644 index 00000000..6f1dcf10 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/lb_firewall_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["LbFirewallParam"] + + +class LbFirewallParam(TypedDict, total=False): + allow: List[str] + """ + the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ + + deny: List[str] + """ + the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ diff --git a/src/gradientai/types/gpu_droplets/load_balancer.py b/src/gradientai/types/gpu_droplets/load_balancer.py new file mode 100644 index 00000000..d0e7597a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer.py @@ -0,0 +1,185 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .domains import Domains +from ..._models import BaseModel +from .lb_firewall import LbFirewall +from .glb_settings import GlbSettings +from .health_check import HealthCheck +from ..shared.region import Region +from .forwarding_rule import ForwardingRule +from .sticky_sessions import StickySessions + +__all__ = ["LoadBalancer"] + + +class LoadBalancer(BaseModel): + forwarding_rules: List[ForwardingRule] + """An array of objects specifying the forwarding rules for a load balancer.""" + + id: Optional[str] = None + """A unique ID that can be used to identify and reference a load balancer.""" + + algorithm: Optional[Literal["round_robin", "least_connections"]] = None + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + created_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the load balancer was created. + """ + + disable_lets_encrypt_dns_records: Optional[bool] = None + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Optional[List[Domains]] = None + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets assigned to the load balancer.""" + + enable_backend_keepalive: Optional[bool] = None + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: Optional[bool] = None + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: Optional[LbFirewall] = None + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: Optional[GlbSettings] = None + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: Optional[HealthCheck] = None + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: Optional[int] = None + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + ip: Optional[str] = None + """An attribute containing the public-facing IP address of the load balancer.""" + + ipv6: Optional[str] = None + """An attribute containing the public-facing IPv6 address of the load balancer.""" + + name: Optional[str] = None + """A human-readable name for a load balancer instance.""" + + network: Optional[Literal["EXTERNAL", "INTERNAL"]] = None + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Optional[Literal["IPV4", "DUALSTACK"]] = None + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: Optional[str] = None + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: Optional[bool] = None + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Optional[Region] = None + """The region where the load balancer instance is located. + + When setting a region, the value should be the slug identifier for the region. + When you query a load balancer, an entire region object will be returned. + """ + + size: Optional[Literal["lb-small", "lb-medium", "lb-large"]] = None + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: Optional[int] = None + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + status: Optional[Literal["new", "active", "errored"]] = None + """A status string indicating the current state of the load balancer. + + This can be `new`, `active`, or `errored`. + """ + + sticky_sessions: Optional[StickySessions] = None + """An object specifying sticky sessions settings for the load balancer.""" + + tag: Optional[str] = None + """ + The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + """ + + target_load_balancer_ids: Optional[List[str]] = None + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Optional[Literal["DEFAULT", "STRONG"]] = None + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Optional[Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]] = None + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: Optional[str] = None + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/gradientai/types/gpu_droplets/load_balancer_create_params.py new file mode 100644 index 00000000..a87d9148 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_create_params.py @@ -0,0 +1,335 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .domains_param import DomainsParam +from .lb_firewall_param import LbFirewallParam +from .glb_settings_param import GlbSettingsParam +from .health_check_param import HealthCheckParam +from .forwarding_rule_param import ForwardingRuleParam +from .sticky_sessions_param import StickySessionsParam + +__all__ = ["LoadBalancerCreateParams", "AssignDropletsByID", "AssignDropletsByTag"] + + +class AssignDropletsByID(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + droplet_ids: Iterable[int] + """An array containing the IDs of the Droplets assigned to the load balancer.""" + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +class AssignDropletsByTag(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + tag: str + """ + The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + """ + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +LoadBalancerCreateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag] diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/gradientai/types/gpu_droplets/load_balancer_create_response.py new file mode 100644 index 00000000..ed4f2211 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer + +__all__ = ["LoadBalancerCreateResponse"] + + +class LoadBalancerCreateResponse(BaseModel): + load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/gradientai/types/gpu_droplets/load_balancer_list_params.py new file mode 100644 index 00000000..d0daff3f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["LoadBalancerListParams"] + + +class LoadBalancerListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/gradientai/types/gpu_droplets/load_balancer_list_response.py new file mode 100644 index 00000000..d5d0b4ac --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["LoadBalancerListResponse"] + + +class LoadBalancerListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + load_balancers: Optional[List[LoadBalancer]] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py new file mode 100644 index 00000000..779e9693 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer + +__all__ = ["LoadBalancerRetrieveResponse"] + + +class LoadBalancerRetrieveResponse(BaseModel): + load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/gradientai/types/gpu_droplets/load_balancer_update_params.py new file mode 100644 index 00000000..9a1906cb --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_update_params.py @@ -0,0 +1,335 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .domains_param import DomainsParam +from .lb_firewall_param import LbFirewallParam +from .glb_settings_param import GlbSettingsParam +from .health_check_param import HealthCheckParam +from .forwarding_rule_param import ForwardingRuleParam +from .sticky_sessions_param import StickySessionsParam + +__all__ = ["LoadBalancerUpdateParams", "AssignDropletsByID", "AssignDropletsByTag"] + + +class AssignDropletsByID(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + droplet_ids: Iterable[int] + """An array containing the IDs of the Droplets assigned to the load balancer.""" + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +class AssignDropletsByTag(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + tag: str + """ + The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + """ + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +LoadBalancerUpdateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag] diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/gradientai/types/gpu_droplets/load_balancer_update_response.py new file mode 100644 index 00000000..2b24b376 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer + +__all__ = ["LoadBalancerUpdateResponse"] + + +class LoadBalancerUpdateResponse(BaseModel): + load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/gradientai/types/gpu_droplets/load_balancers/__init__.py new file mode 100644 index 00000000..806a71be --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .droplet_add_params import DropletAddParams as DropletAddParams +from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams +from .forwarding_rule_add_params import ForwardingRuleAddParams as ForwardingRuleAddParams +from .forwarding_rule_remove_params import ForwardingRuleRemoveParams as ForwardingRuleRemoveParams diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py new file mode 100644 index 00000000..ee403f5f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletAddParams"] + + +class DropletAddParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets assigned to the load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py new file mode 100644 index 00000000..d48795e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletRemoveParams"] + + +class DropletRemoveParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets assigned to the load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py new file mode 100644 index 00000000..2cc6a2df --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +from ..forwarding_rule_param import ForwardingRuleParam + +__all__ = ["ForwardingRuleAddParams"] + + +class ForwardingRuleAddParams(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py new file mode 100644 index 00000000..e5209543 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +from ..forwarding_rule_param import ForwardingRuleParam + +__all__ = ["ForwardingRuleRemoveParams"] + + +class ForwardingRuleRemoveParams(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] diff --git a/src/gradientai/types/gpu_droplets/size_list_params.py b/src/gradientai/types/gpu_droplets/size_list_params.py new file mode 100644 index 00000000..5df85a9c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/size_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["SizeListParams"] + + +class SizeListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/size_list_response.py b/src/gradientai/types/gpu_droplets/size_list_response.py new file mode 100644 index 00000000..c0c600b4 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/size_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.size import Size +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["SizeListResponse"] + + +class SizeListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + sizes: List[Size] + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_params.py b/src/gradientai/types/gpu_droplets/snapshot_list_params.py new file mode 100644 index 00000000..6d1b6f5b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/snapshot_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["SnapshotListParams"] + + +class SnapshotListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + resource_type: Literal["droplet", "volume"] + """Used to filter snapshots by a resource type.""" diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_response.py b/src/gradientai/types/gpu_droplets/snapshot_list_response.py new file mode 100644 index 00000000..29b6ec3b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/snapshot_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.snapshots import Snapshots +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["SnapshotListResponse"] + + +class SnapshotListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + snapshots: Optional[List[Snapshots]] = None diff --git a/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py new file mode 100644 index 00000000..38d84c7a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.snapshots import Snapshots + +__all__ = ["SnapshotRetrieveResponse"] + + +class SnapshotRetrieveResponse(BaseModel): + snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions.py b/src/gradientai/types/gpu_droplets/sticky_sessions.py new file mode 100644 index 00000000..78debc07 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/sticky_sessions.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["StickySessions"] + + +class StickySessions(BaseModel): + cookie_name: Optional[str] = None + """The name of the cookie sent to the client. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + cookie_ttl_seconds: Optional[int] = None + """The number of seconds until the cookie set by the load balancer expires. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + type: Optional[Literal["cookies", "none"]] = None + """ + An attribute indicating how and if requests from a client will be persistently + served by the same backend Droplet. The possible values are `cookies` or `none`. + """ diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/gradientai/types/gpu_droplets/sticky_sessions_param.py new file mode 100644 index 00000000..acea4a4a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/sticky_sessions_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["StickySessionsParam"] + + +class StickySessionsParam(TypedDict, total=False): + cookie_name: str + """The name of the cookie sent to the client. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + cookie_ttl_seconds: int + """The number of seconds until the cookie set by the load balancer expires. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + type: Literal["cookies", "none"] + """ + An attribute indicating how and if requests from a client will be persistently + served by the same backend Droplet. The possible values are `cookies` or `none`. + """ diff --git a/src/gradientai/types/gpu_droplets/volume_create_params.py b/src/gradientai/types/gpu_droplets/volume_create_params.py new file mode 100644 index 00000000..fc889801 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_create_params.py @@ -0,0 +1,153 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["VolumeCreateParams", "VolumesExt4", "VolumesXfs"] + + +class VolumesExt4(TypedDict, total=False): + name: Required[str] + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Required[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size_gigabytes: Required[int] + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + description: str + """An optional free-form text field to describe a block storage volume.""" + + filesystem_label: str + """The label applied to the filesystem. + + Labels for ext4 type filesystems may contain 16 characters while labels for xfs + type filesystems are limited to 12 characters. May only be used in conjunction + with filesystem_type. + """ + + filesystem_type: str + """The name of the filesystem type to be used on the volume. + + When provided, the volume will automatically be formatted to the specified + filesystem type. Currently, the available options are `ext4` and `xfs`. + Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian, + Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018. + Attaching pre-formatted volumes to other Droplets is not recommended. + """ + + snapshot_id: str + """The unique identifier for the volume snapshot from which to create the volume.""" + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +class VolumesXfs(TypedDict, total=False): + name: Required[str] + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Required[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size_gigabytes: Required[int] + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + description: str + """An optional free-form text field to describe a block storage volume.""" + + filesystem_label: str + """The label applied to the filesystem. + + Labels for ext4 type filesystems may contain 16 characters while labels for xfs + type filesystems are limited to 12 characters. May only be used in conjunction + with filesystem_type. + """ + + filesystem_type: str + """The name of the filesystem type to be used on the volume. + + When provided, the volume will automatically be formatted to the specified + filesystem type. Currently, the available options are `ext4` and `xfs`. + Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian, + Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018. + Attaching pre-formatted volumes to other Droplets is not recommended. + """ + + snapshot_id: str + """The unique identifier for the volume snapshot from which to create the volume.""" + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +VolumeCreateParams: TypeAlias = Union[VolumesExt4, VolumesXfs] diff --git a/src/gradientai/types/gpu_droplets/volume_create_response.py b/src/gradientai/types/gpu_droplets/volume_create_response.py new file mode 100644 index 00000000..1bca9965 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_create_response.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.region import Region + +__all__ = ["VolumeCreateResponse", "Volume"] + + +class Volume(BaseModel): + id: Optional[str] = None + """The unique identifier for the block storage volume.""" + + created_at: Optional[str] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the block storage volume was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe a block storage volume.""" + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets the volume is attached to. + + Note that at this time, a volume can only be attached to a single Droplet. + """ + + filesystem_label: Optional[str] = None + """The label currently applied to the filesystem.""" + + filesystem_type: Optional[str] = None + """The type of filesystem currently in-use on the volume.""" + + name: Optional[str] = None + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Optional[Region] = None + """The region that the block storage volume is located in. + + When setting a region, the value should be the slug identifier for the region. + When you query a block storage volume, the entire region object will be + returned. + """ + + size_gigabytes: Optional[int] = None + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings applied to the resource. + + Requires `tag:read` scope. + """ + + +class VolumeCreateResponse(BaseModel): + volume: Optional[Volume] = None diff --git a/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py new file mode 100644 index 00000000..26d173f0 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VolumeDeleteByNameParams"] + + +class VolumeDeleteByNameParams(TypedDict, total=False): + name: str + """The block storage volume's name.""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """The slug identifier for the region where the resource is available.""" diff --git a/src/gradientai/types/gpu_droplets/volume_list_params.py b/src/gradientai/types/gpu_droplets/volume_list_params.py new file mode 100644 index 00000000..b4549651 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_list_params.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VolumeListParams"] + + +class VolumeListParams(TypedDict, total=False): + name: str + """The block storage volume's name.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """The slug identifier for the region where the resource is available.""" diff --git a/src/gradientai/types/gpu_droplets/volume_list_response.py b/src/gradientai/types/gpu_droplets/volume_list_response.py new file mode 100644 index 00000000..69ff421a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_list_response.py @@ -0,0 +1,73 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.region import Region +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["VolumeListResponse", "Volume"] + + +class Volume(BaseModel): + id: Optional[str] = None + """The unique identifier for the block storage volume.""" + + created_at: Optional[str] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the block storage volume was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe a block storage volume.""" + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets the volume is attached to. + + Note that at this time, a volume can only be attached to a single Droplet. + """ + + filesystem_label: Optional[str] = None + """The label currently applied to the filesystem.""" + + filesystem_type: Optional[str] = None + """The type of filesystem currently in-use on the volume.""" + + name: Optional[str] = None + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Optional[Region] = None + """The region that the block storage volume is located in. + + When setting a region, the value should be the slug identifier for the region. + When you query a block storage volume, the entire region object will be + returned. + """ + + size_gigabytes: Optional[int] = None + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings applied to the resource. + + Requires `tag:read` scope. + """ + + +class VolumeListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + volumes: List[Volume] + """Array of volumes.""" + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/gradientai/types/gpu_droplets/volume_retrieve_response.py new file mode 100644 index 00000000..3efe8de7 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_retrieve_response.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.region import Region + +__all__ = ["VolumeRetrieveResponse", "Volume"] + + +class Volume(BaseModel): + id: Optional[str] = None + """The unique identifier for the block storage volume.""" + + created_at: Optional[str] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the block storage volume was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe a block storage volume.""" + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets the volume is attached to. + + Note that at this time, a volume can only be attached to a single Droplet. + """ + + filesystem_label: Optional[str] = None + """The label currently applied to the filesystem.""" + + filesystem_type: Optional[str] = None + """The type of filesystem currently in-use on the volume.""" + + name: Optional[str] = None + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Optional[Region] = None + """The region that the block storage volume is located in. + + When setting a region, the value should be the slug identifier for the region. + When you query a block storage volume, the entire region object will be + returned. + """ + + size_gigabytes: Optional[int] = None + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings applied to the resource. + + Requires `tag:read` scope. + """ + + +class VolumeRetrieveResponse(BaseModel): + volume: Optional[Volume] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/__init__.py b/src/gradientai/types/gpu_droplets/volumes/__init__.py new file mode 100644 index 00000000..68d3d1e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/__init__.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .volume_action import VolumeAction as VolumeAction +from .action_list_params import ActionListParams as ActionListParams +from .action_list_response import ActionListResponse as ActionListResponse +from .snapshot_list_params import SnapshotListParams as SnapshotListParams +from .action_retrieve_params import ActionRetrieveParams as ActionRetrieveParams +from .snapshot_create_params import SnapshotCreateParams as SnapshotCreateParams +from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse +from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse +from .snapshot_create_response import SnapshotCreateResponse as SnapshotCreateResponse +from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse +from .action_initiate_by_id_params import ActionInitiateByIDParams as ActionInitiateByIDParams +from .action_initiate_by_id_response import ActionInitiateByIDResponse as ActionInitiateByIDResponse +from .action_initiate_by_name_params import ActionInitiateByNameParams as ActionInitiateByNameParams +from .action_initiate_by_name_response import ActionInitiateByNameResponse as ActionInitiateByNameResponse diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py new file mode 100644 index 00000000..6d41d463 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py @@ -0,0 +1,133 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionInitiateByIDParams", "VolumeActionPostAttach", "VolumeActionPostDetach", "VolumeActionPostResize"] + + +class VolumeActionPostAttach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +class VolumeActionPostDetach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + +class VolumeActionPostResize(TypedDict, total=False): + size_gigabytes: Required[int] + """The new size of the block storage volume in GiB (1024^3).""" + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + +ActionInitiateByIDParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach, VolumeActionPostResize] diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py new file mode 100644 index 00000000..d8460f22 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction + +__all__ = ["ActionInitiateByIDResponse"] + + +class ActionInitiateByIDResponse(BaseModel): + action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py new file mode 100644 index 00000000..d1a7d084 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py @@ -0,0 +1,97 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionInitiateByNameParams", "VolumeActionPostAttach", "VolumeActionPostDetach"] + + +class VolumeActionPostAttach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +class VolumeActionPostDetach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + +ActionInitiateByNameParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach] diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py new file mode 100644 index 00000000..9a935bdf --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction + +__all__ = ["ActionInitiateByNameResponse"] + + +class ActionInitiateByNameResponse(BaseModel): + action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/gradientai/types/gpu_droplets/volumes/action_list_params.py new file mode 100644 index 00000000..dd873288 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ActionListParams"] + + +class ActionListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/gradientai/types/gpu_droplets/volumes/action_list_response.py new file mode 100644 index 00000000..35964633 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[VolumeAction]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py new file mode 100644 index 00000000..93ab443f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ActionRetrieveParams"] + + +class ActionRetrieveParams(TypedDict, total=False): + volume_id: Required[str] + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py new file mode 100644 index 00000000..cd47f37e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction + +__all__ = ["ActionRetrieveResponse"] + + +class ActionRetrieveResponse(BaseModel): + action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py new file mode 100644 index 00000000..8cce4a59 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["SnapshotCreateParams"] + + +class SnapshotCreateParams(TypedDict, total=False): + name: Required[str] + """A human-readable name for the volume snapshot.""" + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py new file mode 100644 index 00000000..41701795 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...shared.snapshots import Snapshots + +__all__ = ["SnapshotCreateResponse"] + + +class SnapshotCreateResponse(BaseModel): + snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py new file mode 100644 index 00000000..65221a79 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["SnapshotListParams"] + + +class SnapshotListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py new file mode 100644 index 00000000..25d91ed2 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.snapshots import Snapshots +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["SnapshotListResponse"] + + +class SnapshotListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + snapshots: Optional[List[Snapshots]] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py new file mode 100644 index 00000000..3defa47d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...shared.snapshots import Snapshots + +__all__ = ["SnapshotRetrieveResponse"] + + +class SnapshotRetrieveResponse(BaseModel): + snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/volume_action.py b/src/gradientai/types/gpu_droplets/volumes/volume_action.py new file mode 100644 index 00000000..e1c01f6c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/volume_action.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...shared.action import Action + +__all__ = ["VolumeAction"] + + +class VolumeAction(Action): + resource_id: Optional[int] = None # type: ignore + + type: Optional[str] = None # type: ignore + """This is the type of action that the object represents. + + For example, this could be "attach_volume" to represent the state of a volume + attach action. + """ diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py index 16cc23c9..10edfbbe 100644 --- a/src/gradientai/types/inference/api_key_create_params.py +++ b/src/gradientai/types/inference/api_key_create_params.py @@ -9,3 +9,4 @@ class APIKeyCreateParams(TypedDict, total=False): name: str + """A human friendly name to identify the key""" diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py index 654e9f1e..f2469e43 100644 --- a/src/gradientai/types/inference/api_key_create_response.py +++ b/src/gradientai/types/inference/api_key_create_response.py @@ -10,3 +10,4 @@ class APIKeyCreateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py index 4d81d047..89102258 100644 --- a/src/gradientai/types/inference/api_key_delete_response.py +++ b/src/gradientai/types/inference/api_key_delete_response.py @@ -10,3 +10,4 @@ class APIKeyDeleteResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py index 11da9398..1f8f96b7 100644 --- a/src/gradientai/types/inference/api_key_list_params.py +++ b/src/gradientai/types/inference/api_key_list_params.py @@ -9,7 +9,7 @@ class APIKeyListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 3e937950..7c474873 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -12,7 +12,10 @@ class APIKeyListResponse(BaseModel): api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py index 23c1c0b9..7f79240a 100644 --- a/src/gradientai/types/inference/api_key_update_params.py +++ b/src/gradientai/types/inference/api_key_update_params.py @@ -11,5 +11,7 @@ class APIKeyUpdateParams(TypedDict, total=False): body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name""" diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py index 44a316dc..c7ce5f0a 100644 --- a/src/gradientai/types/inference/api_key_update_regenerate_response.py +++ b/src/gradientai/types/inference/api_key_update_regenerate_response.py @@ -10,3 +10,4 @@ class APIKeyUpdateRegenerateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py index 3671addf..1b7f92ef 100644 --- a/src/gradientai/types/inference/api_key_update_response.py +++ b/src/gradientai/types/inference/api_key_update_response.py @@ -10,3 +10,4 @@ class APIKeyUpdateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py index bf354a47..3da1c70a 100644 --- a/src/gradientai/types/inference/api_model_api_key_info.py +++ b/src/gradientai/types/inference/api_model_api_key_info.py @@ -10,13 +10,18 @@ class APIModelAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Creation date""" created_by: Optional[str] = None + """Created by""" deleted_at: Optional[datetime] = None + """Deleted date""" name: Optional[str] = None + """Name""" secret_key: Optional[str] = None uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index acf52e30..9ecd777d 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -48,14 +48,18 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): """Tags to organize your knowledge base.""" vpc_uuid: str + """The VPC to deploy the knowledge base database in""" class Datasource(TypedDict, total=False): aws_data_source: AwsDataSourceParam + """AWS S3 Data Source""" bucket_name: str + """Deprecated, moved to data_source_details""" bucket_region: str + """Deprecated, moved to data_source_details""" file_upload_data_source: APIFileUploadDataSourceParam """File to upload as data source for knowledge base.""" @@ -63,5 +67,7 @@ class Datasource(TypedDict, total=False): item_path: str spaces_data_source: APISpacesDataSourceParam + """Spaces Bucket Data Source""" web_crawler_data_source: APIWebCrawlerDataSourceParam + """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py index cc2d8b9f..6d846fa5 100644 --- a/src/gradientai/types/knowledge_base_create_response.py +++ b/src/gradientai/types/knowledge_base_create_response.py @@ -10,3 +10,4 @@ class KnowledgeBaseCreateResponse(BaseModel): knowledge_base: Optional[APIKnowledgeBase] = None + """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py index 6401e25a..b0605a20 100644 --- a/src/gradientai/types/knowledge_base_delete_response.py +++ b/src/gradientai/types/knowledge_base_delete_response.py @@ -9,3 +9,4 @@ class KnowledgeBaseDeleteResponse(BaseModel): uuid: Optional[str] = None + """The id of the deleted knowledge base""" diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py index dcf9a0ec..b2c0eb31 100644 --- a/src/gradientai/types/knowledge_base_list_params.py +++ b/src/gradientai/types/knowledge_base_list_params.py @@ -9,7 +9,7 @@ class KnowledgeBaseListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index e8998b51..08227316 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -12,7 +12,10 @@ class KnowledgeBaseListResponse(BaseModel): knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """The knowledge bases""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py index 5a3b5f2c..55994f70 100644 --- a/src/gradientai/types/knowledge_base_retrieve_response.py +++ b/src/gradientai/types/knowledge_base_retrieve_response.py @@ -28,3 +28,4 @@ class KnowledgeBaseRetrieveResponse(BaseModel): ] = None knowledge_base: Optional[APIKnowledgeBase] = None + """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py index 297c79de..7a86b40c 100644 --- a/src/gradientai/types/knowledge_base_update_params.py +++ b/src/gradientai/types/knowledge_base_update_params.py @@ -12,16 +12,19 @@ class KnowledgeBaseUpdateParams(TypedDict, total=False): database_id: str - """the id of the DigitalOcean database this knowledge base will use, optiona.""" + """The id of the DigitalOcean database this knowledge base will use, optiona.""" embedding_model_uuid: str """Identifier for the foundation model.""" name: str + """Knowledge base name""" project_id: str + """The id of the DigitalOcean project this knowledge base will belong to""" tags: List[str] """Tags to organize your knowledge base.""" body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py index f3ba2c32..0840622c 100644 --- a/src/gradientai/types/knowledge_base_update_response.py +++ b/src/gradientai/types/knowledge_base_update_response.py @@ -10,3 +10,4 @@ class KnowledgeBaseUpdateResponse(BaseModel): knowledge_base: Optional[APIKnowledgeBase] = None + """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py index 1dcc9639..a1c23e09 100644 --- a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py @@ -9,7 +9,10 @@ class APIFileUploadDataSource(BaseModel): original_file_name: Optional[str] = None + """The original file name""" size_in_bytes: Optional[str] = None + """The size of the file in bytes""" stored_object_key: Optional[str] = None + """The object key the file was stored as""" diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py index 37221059..562f8a34 100644 --- a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py @@ -9,7 +9,10 @@ class APIFileUploadDataSourceParam(TypedDict, total=False): original_file_name: str + """The original file name""" size_in_bytes: str + """The size of the file in bytes""" stored_object_key: str + """The object key the file was stored as""" diff --git a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py index 2449e9fd..151b29de 100644 --- a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py @@ -11,24 +11,34 @@ class APIIndexedDataSource(BaseModel): completed_at: Optional[datetime] = None + """Timestamp when data source completed indexing""" data_source_uuid: Optional[str] = None + """Uuid of the indexed data source""" error_details: Optional[str] = None + """A detailed error description""" error_msg: Optional[str] = None + """A string code provinding a hint which part of the system experienced an error""" failed_item_count: Optional[str] = None + """Total count of files that have failed""" indexed_file_count: Optional[str] = None + """Total count of files that have been indexed""" indexed_item_count: Optional[str] = None + """Total count of files that have been indexed""" removed_item_count: Optional[str] = None + """Total count of files that have been removed""" skipped_item_count: Optional[str] = None + """Total count of files that have been skipped""" started_at: Optional[datetime] = None + """Timestamp when data source started indexing""" status: Optional[ Literal[ @@ -42,7 +52,10 @@ class APIIndexedDataSource(BaseModel): ] = None total_bytes: Optional[str] = None + """Total size of files in data source in bytes""" total_bytes_indexed: Optional[str] = None + """Total size of files in data source in bytes that have been indexed""" total_file_count: Optional[str] = None + """Total file count in the data source""" diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py index 573a7c4e..240fd709 100644 --- a/src/gradientai/types/knowledge_bases/api_indexing_job.py +++ b/src/gradientai/types/knowledge_bases/api_indexing_job.py @@ -11,14 +11,17 @@ class APIIndexingJob(BaseModel): completed_datasources: Optional[int] = None + """Number of datasources indexed completed""" created_at: Optional[datetime] = None + """Creation date / time""" data_source_uuids: Optional[List[str]] = None finished_at: Optional[datetime] = None knowledge_base_uuid: Optional[str] = None + """Knowledge base id""" phase: Optional[ Literal[ @@ -47,9 +50,13 @@ class APIIndexingJob(BaseModel): ] = None tokens: Optional[int] = None + """Number of tokens""" total_datasources: Optional[int] = None + """Number of datasources being indexed""" updated_at: Optional[datetime] = None + """Last modified""" uuid: Optional[str] = None + """Unique id""" diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index 202e4202..a4d695d2 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -15,34 +15,46 @@ class AwsDataSource(BaseModel): bucket_name: Optional[str] = None + """Spaces bucket name""" item_path: Optional[str] = None region: Optional[str] = None + """Region of bucket""" class APIKnowledgeBaseDataSource(BaseModel): aws_data_source: Optional[AwsDataSource] = None + """AWS S3 Data Source for Display""" bucket_name: Optional[str] = None + """Name of storage bucket - Deprecated, moved to data_source_details""" created_at: Optional[datetime] = None + """Creation date / time""" file_upload_data_source: Optional[APIFileUploadDataSource] = None """File to upload as data source for knowledge base.""" item_path: Optional[str] = None + """Path of folder or object in bucket - Deprecated, moved to data_source_details""" last_datasource_indexing_job: Optional[APIIndexedDataSource] = None last_indexing_job: Optional[APIIndexingJob] = None + """IndexingJob description""" region: Optional[str] = None + """Region code - Deprecated, moved to data_source_details""" spaces_data_source: Optional[APISpacesDataSource] = None + """Spaces Bucket Data Source""" updated_at: Optional[datetime] = None + """Last modified""" uuid: Optional[str] = None + """Unique id of knowledge base""" web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None + """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py index f3a0421a..02aa479a 100644 --- a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py @@ -9,7 +9,9 @@ class APISpacesDataSource(BaseModel): bucket_name: Optional[str] = None + """Spaces bucket name""" item_path: Optional[str] = None region: Optional[str] = None + """Region of bucket""" diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py index b7f2f657..5eaeb0ad 100644 --- a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py @@ -9,7 +9,9 @@ class APISpacesDataSourceParam(TypedDict, total=False): bucket_name: str + """Spaces bucket name""" item_path: str region: str + """Region of bucket""" diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py index 93d49228..912e3e29 100644 --- a/src/gradientai/types/knowledge_bases/aws_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/aws_data_source_param.py @@ -9,11 +9,15 @@ class AwsDataSourceParam(TypedDict, total=False): bucket_name: str + """Spaces bucket name""" item_path: str key_id: str + """The AWS Key ID""" region: str + """Region of bucket""" secret_key: str + """The AWS Secret Key""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py index 22bd76e7..ac3aa93c 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -14,9 +14,13 @@ class DataSourceCreateParams(TypedDict, total=False): aws_data_source: AwsDataSourceParam + """AWS S3 Data Source""" body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] + """Knowledge base id""" spaces_data_source: APISpacesDataSourceParam + """Spaces Bucket Data Source""" web_crawler_data_source: APIWebCrawlerDataSourceParam + """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py index 1035d3f4..76ec88e2 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_response.py @@ -10,3 +10,4 @@ class DataSourceCreateResponse(BaseModel): knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None + """Data Source configuration for Knowledge Bases""" diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py index 53954d7f..eaad72ff 100644 --- a/src/gradientai/types/knowledge_bases/data_source_delete_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_delete_response.py @@ -9,5 +9,7 @@ class DataSourceDeleteResponse(BaseModel): data_source_uuid: Optional[str] = None + """Data source id""" knowledge_base_uuid: Optional[str] = None + """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py index e3ed5e3c..089eb291 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_params.py @@ -9,7 +9,7 @@ class DataSourceListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index 2e5fc517..f05a49bc 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -12,7 +12,10 @@ class DataSourceListResponse(BaseModel): knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None + """The data sources""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py index 04838472..d92c5790 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py @@ -10,5 +10,10 @@ class IndexingJobCreateParams(TypedDict, total=False): data_source_uuids: List[str] + """ + List of data source ids to index, if none are provided, all data sources will be + indexed + """ knowledge_base_uuid: str + """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py index 835ec60d..685f40ef 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py @@ -10,3 +10,4 @@ class IndexingJobCreateResponse(BaseModel): job: Optional[APIIndexingJob] = None + """IndexingJob description""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py index 90206aba..c9ac560e 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py @@ -9,7 +9,7 @@ class IndexingJobListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py index deea4562..371f51bb 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -12,7 +12,10 @@ class IndexingJobListResponse(BaseModel): jobs: Optional[List[APIIndexingJob]] = None + """The indexing jobs""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py index 6034bdf1..2d6be855 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py @@ -10,3 +10,4 @@ class IndexingJobRetrieveResponse(BaseModel): job: Optional[APIIndexingJob] = None + """IndexingJob description""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py index ae4b394f..9fd41764 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py @@ -10,3 +10,4 @@ class IndexingJobUpdateCancelResponse(BaseModel): job: Optional[APIIndexingJob] = None + """IndexingJob description""" diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 47651759..5915bdd1 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,18 +1,28 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List +from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel -from .shared.api_meta import APIMeta -from .shared.api_links import APILinks -__all__ = ["ModelListResponse"] +__all__ = ["ModelListResponse", "Data"] -class ModelListResponse(BaseModel): - links: Optional[APILinks] = None +class Data(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" - meta: Optional[APIMeta] = None + owned_by: str + """The organization that owns the model.""" + + +class ModelListResponse(BaseModel): + data: List[Data] - models: Optional[List[APIModel]] = None + object: Literal["list"] diff --git a/src/gradientai/types/model_retrieve_response.py b/src/gradientai/types/model_retrieve_response.py new file mode 100644 index 00000000..dd5de863 --- /dev/null +++ b/src/gradientai/types/model_retrieve_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ModelRetrieveResponse"] + + +class ModelRetrieveResponse(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" diff --git a/src/gradientai/types/models/providers/anthropic_create_params.py b/src/gradientai/types/models/providers/anthropic_create_params.py index b624121f..c9fd6e85 100644 --- a/src/gradientai/types/models/providers/anthropic_create_params.py +++ b/src/gradientai/types/models/providers/anthropic_create_params.py @@ -9,5 +9,7 @@ class AnthropicCreateParams(TypedDict, total=False): api_key: str + """Anthropic API key""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/anthropic_create_response.py b/src/gradientai/types/models/providers/anthropic_create_response.py index f0b8d2d1..0fbe50bc 100644 --- a/src/gradientai/types/models/providers/anthropic_create_response.py +++ b/src/gradientai/types/models/providers/anthropic_create_response.py @@ -10,3 +10,4 @@ class AnthropicCreateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_delete_response.py b/src/gradientai/types/models/providers/anthropic_delete_response.py index a3842bbc..b4fdd978 100644 --- a/src/gradientai/types/models/providers/anthropic_delete_response.py +++ b/src/gradientai/types/models/providers/anthropic_delete_response.py @@ -10,3 +10,4 @@ class AnthropicDeleteResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_params.py b/src/gradientai/types/models/providers/anthropic_list_agents_params.py index 1a5b8229..b3308b69 100644 --- a/src/gradientai/types/models/providers/anthropic_list_agents_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_params.py @@ -9,7 +9,7 @@ class AnthropicListAgentsParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_response.py b/src/gradientai/types/models/providers/anthropic_list_agents_response.py index 6816f0db..a1525275 100644 --- a/src/gradientai/types/models/providers/anthropic_list_agents_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_response.py @@ -15,8 +15,10 @@ class AnthropicListAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" from ...api_agent import APIAgent diff --git a/src/gradientai/types/models/providers/anthropic_list_params.py b/src/gradientai/types/models/providers/anthropic_list_params.py index de8ce520..ae1cca58 100644 --- a/src/gradientai/types/models/providers/anthropic_list_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_params.py @@ -9,7 +9,7 @@ class AnthropicListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/anthropic_list_response.py b/src/gradientai/types/models/providers/anthropic_list_response.py index 77999f5b..24d6547a 100644 --- a/src/gradientai/types/models/providers/anthropic_list_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_response.py @@ -12,7 +12,10 @@ class AnthropicListResponse(BaseModel): api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/models/providers/anthropic_retrieve_response.py b/src/gradientai/types/models/providers/anthropic_retrieve_response.py index 7083b75f..61324b7d 100644 --- a/src/gradientai/types/models/providers/anthropic_retrieve_response.py +++ b/src/gradientai/types/models/providers/anthropic_retrieve_response.py @@ -10,3 +10,4 @@ class AnthropicRetrieveResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_update_params.py b/src/gradientai/types/models/providers/anthropic_update_params.py index 7bb03045..865dc29c 100644 --- a/src/gradientai/types/models/providers/anthropic_update_params.py +++ b/src/gradientai/types/models/providers/anthropic_update_params.py @@ -11,7 +11,10 @@ class AnthropicUpdateParams(TypedDict, total=False): api_key: str + """Anthropic API key""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/anthropic_update_response.py b/src/gradientai/types/models/providers/anthropic_update_response.py index d3b2911b..3a6daaea 100644 --- a/src/gradientai/types/models/providers/anthropic_update_response.py +++ b/src/gradientai/types/models/providers/anthropic_update_response.py @@ -10,3 +10,4 @@ class AnthropicUpdateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_create_params.py b/src/gradientai/types/models/providers/openai_create_params.py index da655d75..8ed7f571 100644 --- a/src/gradientai/types/models/providers/openai_create_params.py +++ b/src/gradientai/types/models/providers/openai_create_params.py @@ -9,5 +9,7 @@ class OpenAICreateParams(TypedDict, total=False): api_key: str + """OpenAI API key""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/openai_create_response.py b/src/gradientai/types/models/providers/openai_create_response.py index 4908a91a..b2e94766 100644 --- a/src/gradientai/types/models/providers/openai_create_response.py +++ b/src/gradientai/types/models/providers/openai_create_response.py @@ -10,3 +10,4 @@ class OpenAICreateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_delete_response.py b/src/gradientai/types/models/providers/openai_delete_response.py index 080a251f..e59c89fe 100644 --- a/src/gradientai/types/models/providers/openai_delete_response.py +++ b/src/gradientai/types/models/providers/openai_delete_response.py @@ -10,3 +10,4 @@ class OpenAIDeleteResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_list_params.py b/src/gradientai/types/models/providers/openai_list_params.py index e5b86b8d..5677eeaf 100644 --- a/src/gradientai/types/models/providers/openai_list_params.py +++ b/src/gradientai/types/models/providers/openai_list_params.py @@ -9,7 +9,7 @@ class OpenAIListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/openai_list_response.py b/src/gradientai/types/models/providers/openai_list_response.py index edbd9fb4..698cd11e 100644 --- a/src/gradientai/types/models/providers/openai_list_response.py +++ b/src/gradientai/types/models/providers/openai_list_response.py @@ -12,7 +12,10 @@ class OpenAIListResponse(BaseModel): api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py index 8a41eaf9..2db6d7a1 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py @@ -9,7 +9,7 @@ class OpenAIRetrieveAgentsParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py index b3166636..717a56cd 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py @@ -15,8 +15,10 @@ class OpenAIRetrieveAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" from ...api_agent import APIAgent diff --git a/src/gradientai/types/models/providers/openai_retrieve_response.py b/src/gradientai/types/models/providers/openai_retrieve_response.py index ef23b966..0f382073 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_response.py @@ -10,3 +10,4 @@ class OpenAIRetrieveResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_update_params.py b/src/gradientai/types/models/providers/openai_update_params.py index ab5d02cf..9b99495e 100644 --- a/src/gradientai/types/models/providers/openai_update_params.py +++ b/src/gradientai/types/models/providers/openai_update_params.py @@ -11,7 +11,10 @@ class OpenAIUpdateParams(TypedDict, total=False): api_key: str + """OpenAI API key""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/openai_update_response.py b/src/gradientai/types/models/providers/openai_update_response.py index 9bb80518..ec7a1c94 100644 --- a/src/gradientai/types/models/providers/openai_update_response.py +++ b/src/gradientai/types/models/providers/openai_update_response.py @@ -10,3 +10,4 @@ class OpenAIUpdateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py index 1db0ad50..4fef37b3 100644 --- a/src/gradientai/types/region_list_params.py +++ b/src/gradientai/types/region_list_params.py @@ -8,8 +8,8 @@ class RegionListParams(TypedDict, total=False): - serves_batch: bool - """include datacenters that are capable of running batch jobs.""" + page: int + """Which 'page' of paginated results to return.""" - serves_inference: bool - """include datacenters that serve inference.""" + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py index 0f955b36..f1bf4c69 100644 --- a/src/gradientai/types/region_list_response.py +++ b/src/gradientai/types/region_list_response.py @@ -3,21 +3,17 @@ from typing import List, Optional from .._models import BaseModel +from .shared.region import Region +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties -__all__ = ["RegionListResponse", "Region"] +__all__ = ["RegionListResponse"] -class Region(BaseModel): - inference_url: Optional[str] = None - - region: Optional[str] = None - - serves_batch: Optional[bool] = None - - serves_inference: Optional[bool] = None - - stream_inference_url: Optional[str] = None +class RegionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + regions: List[Region] -class RegionListResponse(BaseModel): - regions: Optional[List[Region]] = None + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py index 9fdd7605..6d90845f 100644 --- a/src/gradientai/types/shared/__init__.py +++ b/src/gradientai/types/shared/__init__.py @@ -1,6 +1,29 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .size import Size as Size +from .image import Image as Image +from .action import Action as Action +from .kernel import Kernel as Kernel +from .region import Region as Region +from .droplet import Droplet as Droplet from .api_meta import APIMeta as APIMeta +from .gpu_info import GPUInfo as GPUInfo from .api_links import APILinks as APILinks +from .disk_info import DiskInfo as DiskInfo +from .snapshots import Snapshots as Snapshots +from .network_v4 import NetworkV4 as NetworkV4 +from .network_v6 import NetworkV6 as NetworkV6 +from .page_links import PageLinks as PageLinks +from .action_link import ActionLink as ActionLink +from .vpc_peering import VpcPeering as VpcPeering +from .subscription import Subscription as Subscription +from .forward_links import ForwardLinks as ForwardLinks +from .backward_links import BackwardLinks as BackwardLinks +from .meta_properties import MetaProperties as MetaProperties +from .completion_usage import CompletionUsage as CompletionUsage +from .garbage_collection import GarbageCollection as GarbageCollection +from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk +from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase +from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/shared/action.py b/src/gradientai/types/shared/action.py new file mode 100644 index 00000000..2b9fbf4e --- /dev/null +++ b/src/gradientai/types/shared/action.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from .region import Region +from ..._models import BaseModel + +__all__ = ["Action"] + + +class Action(BaseModel): + id: Optional[int] = None + """A unique numeric ID that can be used to identify and reference an action.""" + + completed_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the action was completed. + """ + + region: Optional[Region] = None + + region_slug: Optional[str] = None + """A human-readable string that is used as a unique identifier for each region.""" + + resource_id: Optional[int] = None + """A unique identifier for the resource that the action is associated with.""" + + resource_type: Optional[str] = None + """The type of resource that the action is associated with.""" + + started_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the action was initiated. + """ + + status: Optional[Literal["in-progress", "completed", "errored"]] = None + """The current status of the action. + + This can be "in-progress", "completed", or "errored". + """ + + type: Optional[str] = None + """This is the type of action that the object represents. + + For example, this could be "transfer" to represent the state of an image + transfer action. + """ diff --git a/src/gradientai/types/shared/action_link.py b/src/gradientai/types/shared/action_link.py new file mode 100644 index 00000000..78aec9ff --- /dev/null +++ b/src/gradientai/types/shared/action_link.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ActionLink"] + + +class ActionLink(BaseModel): + id: Optional[int] = None + """A unique numeric ID that can be used to identify and reference an action.""" + + href: Optional[str] = None + """A URL that can be used to access the action.""" + + rel: Optional[str] = None + """A string specifying the type of the related action.""" diff --git a/src/gradientai/types/shared/api_links.py b/src/gradientai/types/shared/api_links.py index b37113f0..24b19cfe 100644 --- a/src/gradientai/types/shared/api_links.py +++ b/src/gradientai/types/shared/api_links.py @@ -9,13 +9,18 @@ class Pages(BaseModel): first: Optional[str] = None + """First page""" last: Optional[str] = None + """Last page""" next: Optional[str] = None + """Next page""" previous: Optional[str] = None + """Previous page""" class APILinks(BaseModel): pages: Optional[Pages] = None + """Information about how to reach other pages""" diff --git a/src/gradientai/types/shared/api_meta.py b/src/gradientai/types/shared/api_meta.py index 9191812c..dc267527 100644 --- a/src/gradientai/types/shared/api_meta.py +++ b/src/gradientai/types/shared/api_meta.py @@ -9,7 +9,10 @@ class APIMeta(BaseModel): page: Optional[int] = None + """The current page""" pages: Optional[int] = None + """Total number of pages""" total: Optional[int] = None + """Total amount of items over all pages""" diff --git a/src/gradientai/types/shared/backward_links.py b/src/gradientai/types/shared/backward_links.py new file mode 100644 index 00000000..502fefef --- /dev/null +++ b/src/gradientai/types/shared/backward_links.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["BackwardLinks"] + + +class BackwardLinks(BaseModel): + first: Optional[str] = None + """URI of the first page of the results.""" + + prev: Optional[str] = None + """URI of the previous page of the results.""" diff --git a/src/gradientai/types/shared/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py index 4d45ef8d..4dd587f9 100644 --- a/src/gradientai/types/shared/chat_completion_chunk.py +++ b/src/gradientai/types/shared/chat_completion_chunk.py @@ -4,9 +4,43 @@ from typing_extensions import Literal from ..._models import BaseModel +from .completion_usage import CompletionUsage from .chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] +__all__ = [ + "ChatCompletionChunk", + "Choice", + "ChoiceDelta", + "ChoiceDeltaToolCall", + "ChoiceDeltaToolCallFunction", + "ChoiceLogprobs", +] + + +class ChoiceDeltaToolCallFunction(BaseModel): + arguments: Optional[str] = None + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Optional[str] = None + """The name of the function to call.""" + + +class ChoiceDeltaToolCall(BaseModel): + index: int + + id: Optional[str] = None + """The ID of the tool call.""" + + function: Optional[ChoiceDeltaToolCallFunction] = None + """A chunk of a function that the model called.""" + + type: Optional[Literal["function"]] = None + """The type of the tool. Currently, only `function` is supported.""" class ChoiceDelta(BaseModel): @@ -19,6 +53,8 @@ class ChoiceDelta(BaseModel): role: Optional[Literal["developer", "user", "assistant"]] = None """The role of the author of this message.""" + tool_calls: Optional[List[ChoiceDeltaToolCall]] = None + class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None @@ -32,12 +68,12 @@ class Choice(BaseModel): delta: ChoiceDelta """A chat completion delta generated by streamed model responses.""" - finish_reason: Optional[Literal["stop", "length"]] = None + finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached + was reached, `tool_calls` if the model called a tool. """ index: int @@ -47,17 +83,6 @@ class Choice(BaseModel): """Log probability information for the choice.""" -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - class ChatCompletionChunk(BaseModel): id: str """A unique identifier for the chat completion. Each chunk has the same ID.""" @@ -81,7 +106,7 @@ class ChatCompletionChunk(BaseModel): object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" - usage: Optional[Usage] = None + usage: Optional[CompletionUsage] = None """ An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. When present, it diff --git a/src/gradientai/types/shared/completion_usage.py b/src/gradientai/types/shared/completion_usage.py new file mode 100644 index 00000000..a2012eef --- /dev/null +++ b/src/gradientai/types/shared/completion_usage.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["CompletionUsage"] + + +class CompletionUsage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" diff --git a/src/gradientai/types/shared/disk_info.py b/src/gradientai/types/shared/disk_info.py new file mode 100644 index 00000000..3c5c4911 --- /dev/null +++ b/src/gradientai/types/shared/disk_info.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["DiskInfo", "Size"] + + +class Size(BaseModel): + amount: Optional[int] = None + """The amount of space allocated to the disk.""" + + unit: Optional[str] = None + """The unit of measure for the disk size.""" + + +class DiskInfo(BaseModel): + size: Optional[Size] = None + + type: Optional[Literal["local", "scratch"]] = None + """The type of disk. + + All Droplets contain a `local` disk. Additionally, GPU Droplets can also have a + `scratch` disk for non-persistent data. + """ diff --git a/src/gradientai/types/shared/droplet.py b/src/gradientai/types/shared/droplet.py new file mode 100644 index 00000000..9d2bb17c --- /dev/null +++ b/src/gradientai/types/shared/droplet.py @@ -0,0 +1,143 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .size import Size +from .image import Image +from .kernel import Kernel +from .region import Region +from .gpu_info import GPUInfo +from ..._models import BaseModel +from .disk_info import DiskInfo +from .network_v4 import NetworkV4 +from .network_v6 import NetworkV6 +from .droplet_next_backup_window import DropletNextBackupWindow + +__all__ = ["Droplet", "Networks"] + + +class Networks(BaseModel): + v4: Optional[List[NetworkV4]] = None + + v6: Optional[List[NetworkV6]] = None + + +class Droplet(BaseModel): + id: int + """A unique identifier for each Droplet instance. + + This is automatically generated upon Droplet creation. + """ + + backup_ids: List[int] + """ + An array of backup IDs of any backups that have been taken of the Droplet + instance. Droplet backups are enabled at the time of the instance creation. + Requires `image:read` scope. + """ + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the Droplet was created. + """ + + disk: int + """The size of the Droplet's disk in gigabytes.""" + + features: List[str] + """An array of features enabled on this Droplet.""" + + image: Image + """The Droplet's image. Requires `image:read` scope.""" + + locked: bool + """ + A boolean value indicating whether the Droplet has been locked, preventing + actions by users. + """ + + memory: int + """Memory of the Droplet in megabytes.""" + + name: str + """The human-readable name set for the Droplet instance.""" + + networks: Networks + """The details of the network that are configured for the Droplet instance. + + This is an object that contains keys for IPv4 and IPv6. The value of each of + these is an array that contains objects describing an individual IP resource + allocated to the Droplet. These will define attributes like the IP address, + netmask, and gateway of the specific network depending on the type of network it + is. + """ + + next_backup_window: Optional[DropletNextBackupWindow] = None + """ + The details of the Droplet's backups feature, if backups are configured for the + Droplet. This object contains keys for the start and end times of the window + during which the backup will start. + """ + + region: Region + + size: Size + + size_slug: str + """The unique slug identifier for the size of this Droplet.""" + + snapshot_ids: List[int] + """ + An array of snapshot IDs of any snapshots created from the Droplet instance. + Requires `image:read` scope. + """ + + status: Literal["new", "active", "off", "archive"] + """A status string indicating the state of the Droplet instance. + + This may be "new", "active", "off", or "archive". + """ + + tags: List[str] + """An array of Tags the Droplet has been tagged with. Requires `tag:read` scope.""" + + vcpus: int + """The number of virtual CPUs.""" + + volume_ids: List[str] + """ + A flat array including the unique identifier for each Block Storage volume + attached to the Droplet. Requires `block_storage:read` scope. + """ + + disk_info: Optional[List[DiskInfo]] = None + """ + An array of objects containing information about the disks available to the + Droplet. + """ + + gpu_info: Optional[GPUInfo] = None + """ + An object containing information about the GPU capabilities of Droplets created + with this size. + """ + + kernel: Optional[Kernel] = None + """ + **Note**: All Droplets created after March 2017 use internal kernels by default. + These Droplets will have this attribute set to `null`. + + The current + [kernel](https://docs.digitalocean.com/products/droplets/how-to/kernel/) for + Droplets with externally managed kernels. This will initially be set to the + kernel of the base image when the Droplet is created. + """ + + vpc_uuid: Optional[str] = None + """ + A string specifying the UUID of the VPC to which the Droplet is assigned. + Requires `vpc:read` scope. + """ diff --git a/src/gradientai/types/shared/droplet_next_backup_window.py b/src/gradientai/types/shared/droplet_next_backup_window.py new file mode 100644 index 00000000..81d07be6 --- /dev/null +++ b/src/gradientai/types/shared/droplet_next_backup_window.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["DropletNextBackupWindow"] + + +class DropletNextBackupWindow(BaseModel): + end: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format specifying the end + of the Droplet's backup window. + """ + + start: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format specifying the start + of the Droplet's backup window. + """ diff --git a/src/gradientai/types/shared/firewall_rule_target.py b/src/gradientai/types/shared/firewall_rule_target.py new file mode 100644 index 00000000..11f61065 --- /dev/null +++ b/src/gradientai/types/shared/firewall_rule_target.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["FirewallRuleTarget"] + + +class FirewallRuleTarget(BaseModel): + addresses: Optional[List[str]] = None + """ + An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, + and/or IPv6 CIDRs to which the firewall will allow traffic. + """ + + droplet_ids: Optional[List[int]] = None + """ + An array containing the IDs of the Droplets to which the firewall will allow + traffic. + """ + + kubernetes_ids: Optional[List[str]] = None + """ + An array containing the IDs of the Kubernetes clusters to which the firewall + will allow traffic. + """ + + load_balancer_uids: Optional[List[str]] = None + """ + An array containing the IDs of the load balancers to which the firewall will + allow traffic. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/shared/forward_links.py b/src/gradientai/types/shared/forward_links.py new file mode 100644 index 00000000..30d46985 --- /dev/null +++ b/src/gradientai/types/shared/forward_links.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ForwardLinks"] + + +class ForwardLinks(BaseModel): + last: Optional[str] = None + """URI of the last page of the results.""" + + next: Optional[str] = None + """URI of the next page of the results.""" diff --git a/src/gradientai/types/shared/garbage_collection.py b/src/gradientai/types/shared/garbage_collection.py new file mode 100644 index 00000000..f1f7f4cd --- /dev/null +++ b/src/gradientai/types/shared/garbage_collection.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["GarbageCollection"] + + +class GarbageCollection(BaseModel): + blobs_deleted: Optional[int] = None + """The number of blobs deleted as a result of this garbage collection.""" + + created_at: Optional[datetime] = None + """The time the garbage collection was created.""" + + freed_bytes: Optional[int] = None + """The number of bytes freed as a result of this garbage collection.""" + + registry_name: Optional[str] = None + """The name of the container registry.""" + + status: Optional[ + Literal[ + "requested", + "waiting for write JWTs to expire", + "scanning manifests", + "deleting unreferenced blobs", + "cancelling", + "failed", + "succeeded", + "cancelled", + ] + ] = None + """The current status of this garbage collection.""" + + updated_at: Optional[datetime] = None + """The time the garbage collection was last updated.""" + + uuid: Optional[str] = None + """A string specifying the UUID of the garbage collection.""" diff --git a/src/gradientai/types/shared/gpu_info.py b/src/gradientai/types/shared/gpu_info.py new file mode 100644 index 00000000..a285dd23 --- /dev/null +++ b/src/gradientai/types/shared/gpu_info.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["GPUInfo", "Vram"] + + +class Vram(BaseModel): + amount: Optional[int] = None + """The amount of VRAM allocated to the GPU.""" + + unit: Optional[str] = None + """The unit of measure for the VRAM.""" + + +class GPUInfo(BaseModel): + count: Optional[int] = None + """The number of GPUs allocated to the Droplet.""" + + model: Optional[str] = None + """The model of the GPU.""" + + vram: Optional[Vram] = None diff --git a/src/gradientai/types/shared/image.py b/src/gradientai/types/shared/image.py new file mode 100644 index 00000000..d8a7acde --- /dev/null +++ b/src/gradientai/types/shared/image.py @@ -0,0 +1,131 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Image"] + + +class Image(BaseModel): + id: Optional[int] = None + """A unique number that can be used to identify and reference a specific image.""" + + created_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the image was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe an image.""" + + distribution: Optional[ + Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + ] = None + """The name of a custom image's distribution. + + Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, + `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, + `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but + ignored, and `Unknown` will be used in its place. + """ + + error_message: Optional[str] = None + """ + A string containing information about errors that may occur when importing a + custom image. + """ + + min_disk_size: Optional[int] = None + """The minimum disk size in GB required for a Droplet to use this image.""" + + name: Optional[str] = None + """The display name that has been given to an image. + + This is what is shown in the control panel and is generally a descriptive title + for the image in question. + """ + + public: Optional[bool] = None + """ + This is a boolean value that indicates whether the image in question is public + or not. An image that is public is available to all accounts. A non-public image + is only accessible from your account. + """ + + regions: Optional[ + List[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + ] = None + """This attribute is an array of the regions that the image is available in. + + The regions are represented by their identifying slug values. + """ + + size_gigabytes: Optional[float] = None + """The size of the image in gigabytes.""" + + slug: Optional[str] = None + """ + A uniquely identifying string that is associated with each of the + DigitalOcean-provided public images. These can be used to reference a public + image as an alternative to the numeric id. + """ + + status: Optional[Literal["NEW", "available", "pending", "deleted", "retired"]] = None + """A status string indicating the state of a custom image. + + This may be `NEW`, `available`, `pending`, `deleted`, or `retired`. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + type: Optional[Literal["base", "snapshot", "backup", "custom", "admin"]] = None + """Describes the kind of image. + + It may be one of `base`, `snapshot`, `backup`, `custom`, or `admin`. + Respectively, this specifies whether an image is a DigitalOcean base OS image, + user-generated Droplet snapshot, automatically created Droplet backup, + user-provided virtual machine image, or an image used for DigitalOcean managed + resources (e.g. DOKS worker nodes). + """ diff --git a/src/gradientai/types/shared/kernel.py b/src/gradientai/types/shared/kernel.py new file mode 100644 index 00000000..78a63427 --- /dev/null +++ b/src/gradientai/types/shared/kernel.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["Kernel"] + + +class Kernel(BaseModel): + id: Optional[int] = None + """A unique number used to identify and reference a specific kernel.""" + + name: Optional[str] = None + """The display name of the kernel. + + This is shown in the web UI and is generally a descriptive title for the kernel + in question. + """ + + version: Optional[str] = None + """ + A standard kernel version string representing the version, patch, and release + information. + """ diff --git a/src/gradientai/types/shared/meta_properties.py b/src/gradientai/types/shared/meta_properties.py new file mode 100644 index 00000000..a78a64d6 --- /dev/null +++ b/src/gradientai/types/shared/meta_properties.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["MetaProperties"] + + +class MetaProperties(BaseModel): + total: Optional[int] = None + """Number of objects returned by the request.""" diff --git a/src/gradientai/types/shared/network_v4.py b/src/gradientai/types/shared/network_v4.py new file mode 100644 index 00000000..bbf8490a --- /dev/null +++ b/src/gradientai/types/shared/network_v4.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["NetworkV4"] + + +class NetworkV4(BaseModel): + gateway: Optional[str] = None + """The gateway of the specified IPv4 network interface. + + For private interfaces, a gateway is not provided. This is denoted by returning + `nil` as its value. + """ + + ip_address: Optional[str] = None + """The IP address of the IPv4 network interface.""" + + netmask: Optional[str] = None + """The netmask of the IPv4 network interface.""" + + type: Optional[Literal["public", "private"]] = None + """The type of the IPv4 network interface.""" diff --git a/src/gradientai/types/shared/network_v6.py b/src/gradientai/types/shared/network_v6.py new file mode 100644 index 00000000..a3eb6b42 --- /dev/null +++ b/src/gradientai/types/shared/network_v6.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["NetworkV6"] + + +class NetworkV6(BaseModel): + gateway: Optional[str] = None + """The gateway of the specified IPv6 network interface.""" + + ip_address: Optional[str] = None + """The IP address of the IPv6 network interface.""" + + netmask: Optional[int] = None + """The netmask of the IPv6 network interface.""" + + type: Optional[Literal["public"]] = None + """The type of the IPv6 network interface. + + **Note**: IPv6 private networking is not currently supported. + """ diff --git a/src/gradientai/types/shared/page_links.py b/src/gradientai/types/shared/page_links.py new file mode 100644 index 00000000..bfceabef --- /dev/null +++ b/src/gradientai/types/shared/page_links.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import TypeAlias + +from ..._models import BaseModel +from .forward_links import ForwardLinks +from .backward_links import BackwardLinks + +__all__ = ["PageLinks", "Pages"] + +Pages: TypeAlias = Union[ForwardLinks, BackwardLinks, object] + + +class PageLinks(BaseModel): + pages: Optional[Pages] = None diff --git a/src/gradientai/types/shared/region.py b/src/gradientai/types/shared/region.py new file mode 100644 index 00000000..d2fe7c51 --- /dev/null +++ b/src/gradientai/types/shared/region.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["Region"] + + +class Region(BaseModel): + available: bool + """ + This is a boolean value that represents whether new Droplets can be created in + this region. + """ + + features: List[str] + """ + This attribute is set to an array which contains features available in this + region + """ + + name: str + """The display name of the region. + + This will be a full name that is used in the control panel and other interfaces. + """ + + sizes: List[str] + """ + This attribute is set to an array which contains the identifying slugs for the + sizes available in this region. sizes:read is required to view. + """ + + slug: str + """A human-readable string that is used as a unique identifier for each region.""" diff --git a/src/gradientai/types/shared/size.py b/src/gradientai/types/shared/size.py new file mode 100644 index 00000000..42b0b41f --- /dev/null +++ b/src/gradientai/types/shared/size.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .gpu_info import GPUInfo +from ..._models import BaseModel +from .disk_info import DiskInfo + +__all__ = ["Size"] + + +class Size(BaseModel): + available: bool + """ + This is a boolean value that represents whether new Droplets can be created with + this size. + """ + + description: str + """A string describing the class of Droplets created from this size. + + For example: Basic, General Purpose, CPU-Optimized, Memory-Optimized, or + Storage-Optimized. + """ + + disk: int + """The amount of disk space set aside for Droplets of this size. + + The value is represented in gigabytes. + """ + + memory: int + """The amount of RAM allocated to Droplets created of this size. + + The value is represented in megabytes. + """ + + price_hourly: float + """This describes the price of the Droplet size as measured hourly. + + The value is measured in US dollars. + """ + + price_monthly: float + """ + This attribute describes the monthly cost of this Droplet size if the Droplet is + kept for an entire month. The value is measured in US dollars. + """ + + regions: List[str] + """ + An array containing the region slugs where this size is available for Droplet + creates. regions:read is required to view. + """ + + slug: str + """A human-readable string that is used to uniquely identify each size.""" + + transfer: float + """ + The amount of transfer bandwidth that is available for Droplets created in this + size. This only counts traffic on the public interface. The value is given in + terabytes. + """ + + vcpus: int + """The number of CPUs allocated to Droplets of this size.""" + + disk_info: Optional[List[DiskInfo]] = None + """ + An array of objects containing information about the disks available to Droplets + created with this size. + """ + + gpu_info: Optional[GPUInfo] = None + """ + An object containing information about the GPU capabilities of Droplets created + with this size. + """ diff --git a/src/gradientai/types/shared/snapshots.py b/src/gradientai/types/shared/snapshots.py new file mode 100644 index 00000000..940b58c8 --- /dev/null +++ b/src/gradientai/types/shared/snapshots.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Snapshots"] + + +class Snapshots(BaseModel): + id: str + """The unique identifier for the snapshot.""" + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the snapshot was created. + """ + + min_disk_size: int + """The minimum size in GB required for a volume or Droplet to use this snapshot.""" + + name: str + """A human-readable name for the snapshot.""" + + regions: List[str] + """An array of the regions that the snapshot is available in. + + The regions are represented by their identifying slug values. + """ + + resource_id: str + """The unique identifier for the resource that the snapshot originated from.""" + + resource_type: Literal["droplet", "volume"] + """The type of resource that the snapshot originated from.""" + + size_gigabytes: float + """The billable size of the snapshot in gigabytes.""" + + tags: Optional[List[str]] = None + """An array of Tags the snapshot has been tagged with. + + Requires `tag:read` scope. + """ diff --git a/src/gradientai/types/shared/subscription.py b/src/gradientai/types/shared/subscription.py new file mode 100644 index 00000000..4d77a9b8 --- /dev/null +++ b/src/gradientai/types/shared/subscription.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel +from .subscription_tier_base import SubscriptionTierBase + +__all__ = ["Subscription"] + + +class Subscription(BaseModel): + created_at: Optional[datetime] = None + """The time at which the subscription was created.""" + + tier: Optional[SubscriptionTierBase] = None + + updated_at: Optional[datetime] = None + """The time at which the subscription was last updated.""" diff --git a/src/gradientai/types/shared/subscription_tier_base.py b/src/gradientai/types/shared/subscription_tier_base.py new file mode 100644 index 00000000..65e1a316 --- /dev/null +++ b/src/gradientai/types/shared/subscription_tier_base.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["SubscriptionTierBase"] + + +class SubscriptionTierBase(BaseModel): + allow_storage_overage: Optional[bool] = None + """ + A boolean indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB used. + """ + + included_bandwidth_bytes: Optional[int] = None + """ + The amount of outbound data transfer included in the subscription tier in bytes. + """ + + included_repositories: Optional[int] = None + """The number of repositories included in the subscription tier. + + `0` indicates that the subscription tier includes unlimited repositories. + """ + + included_storage_bytes: Optional[int] = None + """The amount of storage included in the subscription tier in bytes.""" + + monthly_price_in_cents: Optional[int] = None + """The monthly cost of the subscription tier in cents.""" + + name: Optional[str] = None + """The name of the subscription tier.""" + + slug: Optional[str] = None + """The slug identifier of the subscription tier.""" + + storage_overage_price_in_cents: Optional[int] = None + """ + The price paid in cents per GiB for additional storage beyond what is included + in the subscription plan. + """ diff --git a/src/gradientai/types/shared/vpc_peering.py b/src/gradientai/types/shared/vpc_peering.py new file mode 100644 index 00000000..ef674e23 --- /dev/null +++ b/src/gradientai/types/shared/vpc_peering.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["VpcPeering"] + + +class VpcPeering(BaseModel): + id: Optional[str] = None + """A unique ID that can be used to identify and reference the VPC peering.""" + + created_at: Optional[datetime] = None + """A time value given in ISO8601 combined date and time format.""" + + name: Optional[str] = None + """The name of the VPC peering. + + Must be unique within the team and may only contain alphanumeric characters and + dashes. + """ + + status: Optional[Literal["PROVISIONING", "ACTIVE", "DELETING"]] = None + """The current status of the VPC peering.""" + + vpc_ids: Optional[List[str]] = None + """An array of the two peered VPCs IDs.""" diff --git a/src/gradientai/types/shared_params/__init__.py b/src/gradientai/types/shared_params/__init__.py new file mode 100644 index 00000000..ccdec8fd --- /dev/null +++ b/src/gradientai/types/shared_params/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget diff --git a/src/gradientai/types/shared_params/firewall_rule_target.py b/src/gradientai/types/shared_params/firewall_rule_target.py new file mode 100644 index 00000000..49a5f75c --- /dev/null +++ b/src/gradientai/types/shared_params/firewall_rule_target.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable, Optional +from typing_extensions import TypedDict + +__all__ = ["FirewallRuleTarget"] + + +class FirewallRuleTarget(TypedDict, total=False): + addresses: List[str] + """ + An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, + and/or IPv6 CIDRs to which the firewall will allow traffic. + """ + + droplet_ids: Iterable[int] + """ + An array containing the IDs of the Droplets to which the firewall will allow + traffic. + """ + + kubernetes_ids: List[str] + """ + An array containing the IDs of the Kubernetes clusters to which the firewall + will allow traffic. + """ + + load_balancer_uids: List[str] + """ + An array containing the IDs of the load balancers to which the firewall will + allow traffic. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 4630adfc..dc13cd85 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -54,6 +54,17 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -136,6 +147,17 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -224,6 +246,17 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -306,6 +339,17 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py new file mode 100644 index 00000000..6b8f8bc7 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents.evaluation_metrics import ModelListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModels: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.agents.evaluation_metrics.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.agents.evaluation_metrics.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModels: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.agents.evaluation_metrics.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.agents.evaluation_metrics.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index afeaa8f1..ea39c474 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -34,9 +34,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.create( - agent_uuids=["string"], - description="description", - name="name", + agent_uuids=["example string"], + description='"example string"', + name='"example name"', ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -108,7 +108,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -116,10 +116,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", - description="description", - name="name", - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + name='"example name"', + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -127,7 +127,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -139,7 +139,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -285,9 +285,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create( - agent_uuids=["string"], - description="description", - name="name", + agent_uuids=["example string"], + description='"example string"', + name='"example name"', ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -359,7 +359,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -367,10 +367,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", - description="description", - name="name", - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + name='"example name"', + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -378,7 +378,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -390,7 +390,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 764e13e0..635721b3 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -24,7 +24,7 @@ class TestAgents: @parametrize def test_method_list(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentListResponse, agent, path=["response"]) @@ -32,8 +32,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", - field_mask={"paths": ["string"]}, + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', only_deployed=True, page=0, per_page=0, @@ -44,7 +43,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -56,7 +55,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -78,7 +77,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_move(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -86,9 +85,9 @@ def test_method_move(self, client: GradientAI) -> None: @parametrize def test_method_move_with_all_params(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", - agent_uuids=["string"], - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuids=["example string"], + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -96,7 +95,7 @@ def test_method_move_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_move(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -108,7 +107,7 @@ def test_raw_response_move(self, client: GradientAI) -> None: @parametrize def test_streaming_response_move(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -136,7 +135,7 @@ class TestAsyncAgents: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentListResponse, agent, path=["response"]) @@ -144,8 +143,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", - field_mask={"paths": ["string"]}, + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', only_deployed=True, page=0, per_page=0, @@ -156,7 +154,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -168,7 +166,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -190,7 +188,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_move(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -198,9 +196,9 @@ async def test_method_move(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", - agent_uuids=["string"], - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuids=["example string"], + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -208,7 +206,7 @@ async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -220,7 +218,7 @@ async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index beb9666a..c29511f5 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -27,7 +27,7 @@ class TestAPIKeys: @parametrize def test_method_create(self, client: GradientAI) -> None: api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -35,9 +35,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -45,7 +45,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -57,7 +57,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -79,8 +79,8 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -88,11 +88,11 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -100,8 +100,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -113,8 +113,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -129,21 +129,21 @@ def test_streaming_response_update(self, client: GradientAI) -> None: def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -151,7 +151,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -161,7 +161,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -173,7 +173,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,8 +195,8 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_delete(self, client: GradientAI) -> None: api_key = client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -204,8 +204,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -217,8 +217,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -233,22 +233,22 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: api_key = client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) @@ -256,8 +256,8 @@ def test_method_regenerate(self, client: GradientAI) -> None: @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -269,8 +269,8 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -285,14 +285,14 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -305,7 +305,7 @@ class TestAsyncAPIKeys: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -313,9 +313,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -323,7 +323,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -335,7 +335,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,8 +357,8 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -366,11 +366,11 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -378,8 +378,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -391,8 +391,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -407,21 +407,21 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -429,7 +429,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -439,7 +439,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -451,7 +451,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -473,8 +473,8 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -482,8 +482,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -495,8 +495,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -511,22 +511,22 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) @@ -534,8 +534,8 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -547,8 +547,8 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -563,12 +563,12 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index e6ca2644..0413591e 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -31,11 +31,11 @@ def test_method_create(self, client: GradientAI) -> None: def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_dataset = client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - name="name", + name='"example name"', ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -75,7 +75,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": "file_name", + "file_name": '"example name"', "file_size": "file_size", } ], @@ -127,11 +127,11 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - name="name", + name='"example name"', ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -173,7 +173,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params( evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": "file_name", + "file_name": '"example name"', "file_size": "file_size", } ], diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index be83e330..d64367ae 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -9,7 +9,10 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import EvaluationMetricListResponse +from gradientai.types.agents import ( + EvaluationMetricListResponse, + EvaluationMetricListRegionsResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -45,6 +48,43 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_list_regions(self, client: GradientAI) -> None: + evaluation_metric = client.agents.evaluation_metrics.list_regions() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_regions_with_all_params(self, client: GradientAI) -> None: + evaluation_metric = client.agents.evaluation_metrics.list_regions( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_regions(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.with_raw_response.list_regions() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_metric = response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_regions(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_metric = response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + assert cast(Any, response.is_closed) is True + class TestAsyncEvaluationMetrics: parametrize = pytest.mark.parametrize( @@ -78,3 +118,40 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_regions(self, async_client: AsyncGradientAI) -> None: + evaluation_metric = await async_client.agents.evaluation_metrics.list_regions() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_regions_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_metric = await async_client.agents.evaluation_metrics.list_regions( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_regions(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.with_raw_response.list_regions() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_metric = await response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_regions(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_metric = await response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index b2fce320..2ea44e6b 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -32,9 +32,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.create( - agent_uuids=["string"], - run_name="run_name", - test_case_uuid="test_case_uuid", + agent_uuids=["example string"], + run_name="Evaluation Run Name", + test_case_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @@ -106,7 +106,17 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_list_results(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_results_with_all_params(self, client: GradientAI) -> None: + evaluation_run = client.agents.evaluation_runs.list_results( + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) @@ -114,7 +124,7 @@ def test_method_list_results(self, client: GradientAI) -> None: @parametrize def test_raw_response_list_results(self, client: GradientAI) -> None: response = client.agents.evaluation_runs.with_raw_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -126,7 +136,7 @@ def test_raw_response_list_results(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_results(self, client: GradientAI) -> None: with client.agents.evaluation_runs.with_streaming_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -141,15 +151,15 @@ def test_streaming_response_list_results(self, client: GradientAI) -> None: def test_path_params_list_results(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.list_results( - "", + evaluation_run_uuid="", ) @pytest.mark.skip() @parametrize def test_method_retrieve_results(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) @@ -157,8 +167,8 @@ def test_method_retrieve_results(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve_results(self, client: GradientAI) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -170,8 +180,8 @@ def test_raw_response_retrieve_results(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,7 +196,7 @@ def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: def test_path_params_retrieve_results(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, + prompt_id=1, evaluation_run_uuid="", ) @@ -206,9 +216,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.create( - agent_uuids=["string"], - run_name="run_name", - test_case_uuid="test_case_uuid", + agent_uuids=["example string"], + run_name="Evaluation Run Name", + test_case_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @@ -280,7 +290,17 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_results_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.agents.evaluation_runs.list_results( + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) @@ -288,7 +308,7 @@ async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -300,7 +320,7 @@ async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> @parametrize async def test_streaming_response_list_results(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -315,15 +335,15 @@ async def test_streaming_response_list_results(self, async_client: AsyncGradient async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.list_results( - "", + evaluation_run_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) @@ -331,8 +351,8 @@ async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> N @parametrize async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -344,8 +364,8 @@ async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI @parametrize async def test_streaming_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -360,6 +380,6 @@ async def test_streaming_response_retrieve_results(self, async_client: AsyncGrad async def test_path_params_retrieve_results(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, + prompt_id=1, evaluation_run_uuid="", ) diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index a0b5ee77..e9083ba3 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -33,16 +33,17 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create( - dataset_uuid="dataset_uuid", - description="description", - metrics=["string"], - name="name", + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics=["example string"], + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -72,7 +73,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_retrieve(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -80,7 +81,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -89,7 +90,7 @@ def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -101,7 +102,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -123,7 +124,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -131,17 +132,18 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", - dataset_uuid="dataset_uuid", - description="description", - metrics={"metric_uuids": ["string"]}, - name="name", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics={"metric_uuids": ["example string"]}, + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - body_test_case_uuid="test_case_uuid", + body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -149,7 +151,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -161,7 +163,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -211,7 +213,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_method_list_evaluation_runs(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -219,7 +221,7 @@ def test_method_list_evaluation_runs(self, client: GradientAI) -> None: @parametrize def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -228,7 +230,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) - @parametrize def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -240,7 +242,7 @@ def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -276,16 +278,17 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create( - dataset_uuid="dataset_uuid", - description="description", - metrics=["string"], - name="name", + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics=["example string"], + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -315,7 +318,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -323,7 +326,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -332,7 +335,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -344,7 +347,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -366,7 +369,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -374,17 +377,18 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", - dataset_uuid="dataset_uuid", - description="description", - metrics={"metric_uuids": ["string"]}, - name="name", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics={"metric_uuids": ["example string"]}, + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - body_test_case_uuid="test_case_uuid", + body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -392,7 +396,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -404,7 +408,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -454,7 +458,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -462,7 +466,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) @parametrize async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -471,7 +475,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A @parametrize async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -483,7 +487,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie @parametrize async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 5a3693cb..4390d1d2 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -25,7 +25,7 @@ class TestFunctions: @parametrize def test_method_create(self, client: GradientAI) -> None: function = client.agents.functions.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -33,12 +33,12 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: function = client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', input_schema={}, output_schema={}, ) @@ -48,7 +48,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -60,7 +60,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -82,8 +82,8 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) @@ -91,14 +91,14 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', + body_function_uuid='"12345678-1234-1234-1234-123456789012"', input_schema={}, output_schema={}, ) @@ -108,8 +108,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -121,8 +121,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -137,22 +137,22 @@ def test_streaming_response_update(self, client: GradientAI) -> None: def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): client.agents.functions.with_raw_response.update( path_function_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: function = client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) @@ -160,8 +160,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -173,8 +173,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,14 +189,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): client.agents.functions.with_raw_response.delete( function_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -209,7 +209,7 @@ class TestAsyncFunctions: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -217,12 +217,12 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', input_schema={}, output_schema={}, ) @@ -232,7 +232,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -244,7 +244,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,8 +266,8 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) @@ -275,14 +275,14 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', + body_function_uuid='"12345678-1234-1234-1234-123456789012"', input_schema={}, output_schema={}, ) @@ -292,8 +292,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -305,8 +305,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -321,22 +321,22 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( path_function_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) @@ -344,8 +344,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -357,8 +357,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -373,12 +373,12 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( function_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index e62c05ff..2ac20d89 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -63,8 +63,8 @@ def test_path_params_attach(self, client: GradientAI) -> None: @parametrize def test_method_attach_single(self, client: GradientAI) -> None: knowledge_base = client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -72,8 +72,8 @@ def test_method_attach_single(self, client: GradientAI) -> None: @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: response = client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -85,8 +85,8 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: with client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -101,22 +101,22 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: knowledge_base = client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) @@ -124,8 +124,8 @@ def test_method_detach(self, client: GradientAI) -> None: @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: response = client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -137,8 +137,8 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: with client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -153,14 +153,14 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -215,8 +215,8 @@ async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -224,8 +224,8 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -237,8 +237,8 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -253,22 +253,22 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) @@ -276,8 +276,8 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -289,8 +289,8 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -305,12 +305,12 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 2e6dfd7b..d04e8c90 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -26,8 +26,8 @@ class TestRoutes: @parametrize def test_method_update(self, client: GradientAI) -> None: route = client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -35,13 +35,13 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: route = client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', + uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -49,8 +49,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -62,8 +62,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -80,22 +80,22 @@ def test_path_params_update(self, client: GradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.update( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: route = client.agents.routes.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) @@ -103,8 +103,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -116,8 +116,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -132,22 +132,22 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: route = client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -155,12 +155,12 @@ def test_method_add(self, client: GradientAI) -> None: @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: route = client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -168,8 +168,8 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_add(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -181,8 +181,8 @@ def test_raw_response_add(self, client: GradientAI) -> None: @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -199,14 +199,14 @@ def test_path_params_add(self, client: GradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.add( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @@ -261,8 +261,8 @@ class TestAsyncRoutes: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -270,13 +270,13 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', + uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -284,8 +284,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -297,8 +297,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -315,22 +315,22 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): await async_client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) @@ -338,8 +338,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -351,8 +351,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -367,22 +367,22 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -390,12 +390,12 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -403,8 +403,8 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -416,8 +416,8 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -434,14 +434,14 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): await async_client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 79f73672..d6151470 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -24,7 +24,7 @@ class TestVersions: @parametrize def test_method_update(self, client: GradientAI) -> None: version = client.agents.versions.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -32,9 +32,9 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.update( - path_uuid="uuid", - body_uuid="uuid", - version_hash="version_hash", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', + version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -42,7 +42,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -54,7 +54,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -76,7 +76,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @parametrize def test_method_list(self, client: GradientAI) -> None: version = client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -84,7 +84,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -94,7 +94,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -106,7 +106,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,7 +134,7 @@ class TestAsyncVersions: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -142,9 +142,9 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( - path_uuid="uuid", - body_uuid="uuid", - version_hash="version_hash", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', + version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -152,7 +152,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -164,7 +164,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,7 +186,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -194,7 +194,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -204,7 +204,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -216,7 +216,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 25b8419a..46c8b431 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -54,6 +54,17 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -136,6 +147,17 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -224,6 +246,17 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -306,6 +339,17 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", diff --git a/tests/api_resources/gpu_droplets/__init__.py b/tests/api_resources/gpu_droplets/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/account/__init__.py b/tests/api_resources/gpu_droplets/account/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/account/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py new file mode 100644 index 00000000..acad3575 --- /dev/null +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -0,0 +1,399 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.account import ( + KeyListResponse, + KeyCreateResponse, + KeyUpdateResponse, + KeyRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.retrieve( + 512189, + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.retrieve( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.retrieve( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + name="My SSH Public Key", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.update( + ssh_key_identifier=512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.update( + ssh_key_identifier=512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.list( + page=1, + per_page=1, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.delete( + 512189, + ) + assert key is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.delete( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert key is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.delete( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert key is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.retrieve( + 512189, + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.retrieve( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.retrieve( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + name="My SSH Public Key", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.update( + ssh_key_identifier=512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.update( + ssh_key_identifier=512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.list( + page=1, + per_page=1, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.delete( + 512189, + ) + assert key is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.delete( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert key is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.delete( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert key is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/firewalls/__init__.py b/tests/api_resources/gpu_droplets/firewalls/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py new file mode 100644 index 00000000..67d132aa --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDroplets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.firewalls.droplets.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.droplets.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="", + droplet_ids=[49696269], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.firewalls.droplets.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="", + droplet_ids=[49696269], + ) + + +class TestAsyncDroplets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.firewalls.droplets.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="", + droplet_ids=[49696269], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.firewalls.droplets.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="", + droplet_ids=[49696269], + ) diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py new file mode 100644 index 00000000..446a11af --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -0,0 +1,326 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRules: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_method_add_with_all_params(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.rules.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_method_remove_with_all_params(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.rules.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="", + ) + + +class TestAsyncRules: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = await response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = await response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_method_remove_with_all_params(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = await response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = await response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="", + ) diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py new file mode 100644 index 00000000..a0227c61 --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestTags: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + tag = client.gpu_droplets.firewalls.tags.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.tags.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="", + tags=["frontend"], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + tag = client.gpu_droplets.firewalls.tags.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.tags.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="", + tags=["frontend"], + ) + + +class TestAsyncTags: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + tag = await async_client.gpu_droplets.firewalls.tags.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = await response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = await response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="", + tags=["frontend"], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + tag = await async_client.gpu_droplets.firewalls.tags.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = await response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = await response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="", + tags=["frontend"], + ) diff --git a/tests/api_resources/gpu_droplets/floating_ips/__init__.py b/tests/api_resources/gpu_droplets/floating_ips/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/floating_ips/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py new file mode 100644 index 00000000..82a12d2e --- /dev/null +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -0,0 +1,396 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.floating_ips import ( + ActionListResponse, + ActionCreateResponse, + ActionRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create_overload_1(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + type="assign", + ) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create_overload_2(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + droplet_id=758604968, + type="assign", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.list( + "192.168.1.1", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.list( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "", + ) + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create_overload_1(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + type="assign", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create_overload_2(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + droplet_id=758604968, + type="assign", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.list( + "192.168.1.1", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.list( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "", + ) diff --git a/tests/api_resources/gpu_droplets/images/__init__.py b/tests/api_resources/gpu_droplets/images/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/images/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py new file mode 100644 index 00000000..4d59c85b --- /dev/null +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -0,0 +1,321 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.shared import Action +from gradientai.types.gpu_droplets.images import ActionListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.create( + image_id=62137902, + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.retrieve( + action_id=36804636, + image_id=62137902, + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.retrieve( + action_id=36804636, + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.retrieve( + action_id=36804636, + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.list( + 0, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.list( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.list( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.create( + image_id=62137902, + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.retrieve( + action_id=36804636, + image_id=62137902, + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.retrieve( + action_id=36804636, + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.retrieve( + action_id=36804636, + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.list( + 0, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.list( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.list( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/load_balancers/__init__.py b/tests/api_resources/gpu_droplets/load_balancers/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/load_balancers/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py new file mode 100644 index 00000000..333567f4 --- /dev/null +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDroplets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.load_balancers.droplets.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="", + droplet_ids=[3164444, 3164445], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.load_balancers.droplets.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="", + droplet_ids=[3164444, 3164445], + ) + + +class TestAsyncDroplets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.load_balancers.droplets.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="", + droplet_ids=[3164444, 3164445], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.load_balancers.droplets.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="", + droplet_ids=[3164444, 3164445], + ) diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py new file mode 100644 index 00000000..ec6f7838 --- /dev/null +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -0,0 +1,318 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestForwardingRules: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + +class TestAsyncForwardingRules: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = await response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = await response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = await response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = await response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py new file mode 100644 index 00000000..5e443dd8 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -0,0 +1,1209 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + ActionListResponse, + ActionInitiateResponse, + ActionRetrieveResponse, + ActionBulkInitiateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.list( + droplet_id=3164444, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_with_all_params_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "daily", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "weekly", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_3(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_3(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_4(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_4(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_4(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_4(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_5(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_5(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + disk=True, + size="s-2vcpu-2gb", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_5(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_5(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_6(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_6(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image="ubuntu-20-04-x64", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_6(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_6(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_7(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_7(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="nifty-new-name", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_7(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_7(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_8(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_8(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + kernel=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_8(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_8(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_9(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_9(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_9(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_9(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.list( + droplet_id=3164444, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "daily", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "weekly", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + disk=True, + size="s-2vcpu-2gb", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image="ubuntu-20-04-x64", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="nifty-new-name", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + kernel=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py new file mode 100644 index 00000000..42164666 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -0,0 +1,953 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + AutoscaleListResponse, + AutoscaleCreateResponse, + AutoscaleUpdateResponse, + AutoscaleRetrieveResponse, + AutoscaleListHistoryResponse, + AutoscaleListMembersResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAutoscale: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + "cooldown_minutes": 10, + "target_cpu_utilization": 0.5, + "target_memory_utilization": 0.6, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.retrieve( + "autoscale_pool_id", + ) + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.retrieve( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.retrieve( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list( + name="name", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.delete( + "autoscale_pool_id", + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.delete( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.delete( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete_dangerous(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete_dangerous(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="", + x_dangerous=True, + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_history(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_history_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_history(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_history(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_history(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_members(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_members_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_members(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_members(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_members(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="", + ) + + +class TestAsyncAutoscale: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + "cooldown_minutes": 10, + "target_cpu_utilization": 0.5, + "target_memory_utilization": 0.6, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.retrieve( + "autoscale_pool_id", + ) + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.retrieve( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list( + name="name", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.delete( + "autoscale_pool_id", + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.delete( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.delete( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="", + x_dangerous=True, + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_history(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_history_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_history(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_history(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_history(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_members(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_members_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_members(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_members(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_members(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="", + ) diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py new file mode 100644 index 00000000..f8f72140 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -0,0 +1,315 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + BackupListResponse, + BackupListPoliciesResponse, + BackupRetrievePolicyResponse, + BackupListSupportedPoliciesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestBackups: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list( + droplet_id=3164444, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_policies(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list_policies() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_policies_with_all_params(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list_policies( + page=1, + per_page=1, + ) + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_policies(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.list_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_policies(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.list_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_supported_policies(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list_supported_policies() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_supported_policies(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.list_supported_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_supported_policies(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_policy(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.retrieve_policy( + 1, + ) + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_policy(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.retrieve_policy( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_policy(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.retrieve_policy( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncBackups: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list( + droplet_id=3164444, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_policies(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list_policies() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_policies_with_all_params(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list_policies( + page=1, + per_page=1, + ) + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_policies(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.list_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_policies(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.list_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list_supported_policies() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.list_supported_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.retrieve_policy( + 1, + ) + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.retrieve_policy( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.retrieve_policy( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py new file mode 100644 index 00000000..b6922feb --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -0,0 +1,431 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + DestroyWithAssociatedResourceListResponse, + DestroyWithAssociatedResourceCheckStatusResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDestroyWithAssociatedResources: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.list( + 1, + ) + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_check_status(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.check_status( + 1, + ) + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_raw_response_check_status(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_check_status(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete_dangerous(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete_selective(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_method_delete_selective_with_all_params(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + floating_ips=["6186916"], + reserved_ips=["6186916"], + snapshots=["61486916"], + volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"], + volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"], + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_selective(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_selective(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retry(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.retry( + 1, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_retry(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retry(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncDestroyWithAssociatedResources: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.list( + 1, + ) + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_check_status(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.check_status( + 1, + ) + ) + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_check_status(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_check_status(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete_selective(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + ) + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + floating_ips=["6186916"], + reserved_ips=["6186916"], + snapshots=["61486916"], + volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"], + volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"], + ) + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_selective(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_selective(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retry(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.retry( + 1, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retry(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retry(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py new file mode 100644 index 00000000..537fe7d2 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -0,0 +1,617 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + FirewallListResponse, + FirewallCreateResponse, + FirewallUpdateResponse, + FirewallRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFirewalls: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.create() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.create( + body={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "80", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "80", + "protocol": "tcp", + } + ], + "tags": ["base-image", "prod"], + }, + ) + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "8080", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "frontend-firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "8080", + "protocol": "tcp", + } + ], + "tags": ["frontend"], + }, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="", + firewall={"name": "frontend-firewall"}, + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.list() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.list( + page=1, + per_page=1, + ) + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert firewall is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert firewall is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert firewall is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.with_raw_response.delete( + "", + ) + + +class TestAsyncFirewalls: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.create() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.create( + body={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "80", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "80", + "protocol": "tcp", + } + ], + "tags": ["base-image", "prod"], + }, + ) + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "8080", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "frontend-firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "8080", + "protocol": "tcp", + } + ], + "tags": ["frontend"], + }, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="", + firewall={"name": "frontend-firewall"}, + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.list() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.list( + page=1, + per_page=1, + ) + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert firewall is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert firewall is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert firewall is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py new file mode 100644 index 00000000..830e9b39 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -0,0 +1,424 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + FloatingIPListResponse, + FloatingIPCreateResponse, + FloatingIPRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFloatingIPs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.create( + droplet_id=2457247, + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.create( + droplet_id=2457247, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.create( + droplet_id=2457247, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.create( + region="nyc3", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.create( + region="nyc3", + project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.create( + region="nyc3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.create( + region="nyc3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.retrieve( + "192.168.1.1", + ) + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.retrieve( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.list() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.list( + page=1, + per_page=1, + ) + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.delete( + "192.168.1.1", + ) + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.delete( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.delete( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert floating_ip is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.with_raw_response.delete( + "", + ) + + +class TestAsyncFloatingIPs: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.create( + droplet_id=2457247, + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( + droplet_id=2457247, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( + droplet_id=2457247, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.create( + region="nyc3", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.create( + region="nyc3", + project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( + region="nyc3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( + region="nyc3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.retrieve( + "192.168.1.1", + ) + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.retrieve( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.list() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.list( + page=1, + per_page=1, + ) + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.delete( + "192.168.1.1", + ) + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.delete( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.delete( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert floating_ip is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py new file mode 100644 index 00000000..7be6a786 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -0,0 +1,417 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + ImageListResponse, + ImageCreateResponse, + ImageUpdateResponse, + ImageRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestImages: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.create() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.create( + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + region="nyc3", + tags=["base-image", "prod"], + url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img", + ) + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.retrieve( + 0, + ) + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.retrieve( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.retrieve( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.update( + image_id=62137902, + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.update( + image_id=62137902, + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.update( + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.update( + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.list() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.list( + page=1, + per_page=1, + private=True, + tag_name="tag_name", + type="application", + ) + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.delete( + 0, + ) + assert image is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.delete( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert image is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.delete( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert image is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncImages: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.create() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.create( + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + region="nyc3", + tags=["base-image", "prod"], + url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img", + ) + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.retrieve( + 0, + ) + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.retrieve( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.retrieve( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.update( + image_id=62137902, + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.update( + image_id=62137902, + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.update( + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.update( + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.list() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.list( + page=1, + per_page=1, + private=True, + tag_name="tag_name", + type="application", + ) + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.delete( + 0, + ) + assert image is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.delete( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert image is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.delete( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert image is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py new file mode 100644 index 00000000..c1ce1ce2 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -0,0 +1,1443 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + LoadBalancerListResponse, + LoadBalancerCreateResponse, + LoadBalancerUpdateResponse, + LoadBalancerRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestLoadBalancers: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.retrieve( + "lb_id", + ) + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.retrieve( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_overload_1(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_overload_2(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.list() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.list( + page=1, + per_page=1, + ) + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.delete( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.delete( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.delete( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete_cache(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.delete_cache( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_cache(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_cache(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete_cache(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "", + ) + + +class TestAsyncLoadBalancers: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.retrieve( + "lb_id", + ) + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.retrieve( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_overload_1(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_overload_2(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.list() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.list( + page=1, + per_page=1, + ) + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.delete( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete_cache(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.delete_cache( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_cache(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_cache(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete_cache(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "", + ) diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py new file mode 100644 index 00000000..eda73b1e --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import SizeListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSizes: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + size = client.gpu_droplets.sizes.list() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + size = client.gpu_droplets.sizes.list( + page=1, + per_page=1, + ) + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.sizes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + size = response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.sizes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + size = response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncSizes: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + size = await async_client.gpu_droplets.sizes.list() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + size = await async_client.gpu_droplets.sizes.list( + page=1, + per_page=1, + ) + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.sizes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + size = await response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.sizes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + size = await response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py new file mode 100644 index 00000000..5d7132c2 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -0,0 +1,236 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSnapshots: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.retrieve( + 6372321, + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.snapshots.with_raw_response.retrieve( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.snapshots.with_streaming_response.retrieve( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.list() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.list( + page=1, + per_page=1, + resource_type="droplet", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.snapshots.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.snapshots.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.delete( + 6372321, + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.snapshots.with_raw_response.delete( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.snapshots.with_streaming_response.delete( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncSnapshots: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.retrieve( + 6372321, + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.snapshots.with_raw_response.retrieve( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.snapshots.with_streaming_response.retrieve( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.list() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.list( + page=1, + per_page=1, + resource_type="droplet", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.snapshots.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.snapshots.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.delete( + 6372321, + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.snapshots.with_raw_response.delete( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.snapshots.with_streaming_response.delete( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py new file mode 100644 index 00000000..64bcb4c5 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -0,0 +1,568 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + VolumeListResponse, + VolumeCreateResponse, + VolumeRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestVolumes: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.list() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.list( + name="name", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete_by_name(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.delete_by_name() + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_method_delete_by_name_with_all_params(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.delete_by_name( + name="name", + region="nyc3", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_by_name(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.delete_by_name() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_by_name(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncVolumes: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.list() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.list( + name="name", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete_by_name(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.delete_by_name() + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.delete_by_name( + name="name", + region="nyc3", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.delete_by_name() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/volumes/__init__.py b/tests/api_resources/gpu_droplets/volumes/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/volumes/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py new file mode 100644 index 00000000..d5338c97 --- /dev/null +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -0,0 +1,825 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.volumes import ( + ActionListResponse, + ActionRetrieveResponse, + ActionInitiateByIDResponse, + ActionInitiateByNameResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_with_all_params_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_initiate_by_id_overload_1(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_initiate_by_id_overload_2(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_with_all_params_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_initiate_by_id_overload_3(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + size_gigabytes=16384, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_with_all_params_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + size_gigabytes=16384, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py new file mode 100644 index 00000000..8b72305c --- /dev/null +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -0,0 +1,412 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.volumes import ( + SnapshotListResponse, + SnapshotCreateResponse, + SnapshotRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSnapshots: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + tags=["base-image", "prod"], + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="", + name="big-data-snapshot1475261774", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.retrieve( + "snapshot_id", + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.delete( + "snapshot_id", + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "", + ) + + +class TestAsyncSnapshots: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + tags=["base-image", "prod"], + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="", + name="big-data-snapshot1475261774", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.retrieve( + "snapshot_id", + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.delete( + "snapshot_id", + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 90bf95b9..157a2e3d 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: api_key = client.inference.api_keys.create( - name="name", + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: api_key = client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -71,9 +71,9 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: api_key = client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -81,7 +81,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.inference.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -93,7 +93,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.inference.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,7 +248,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.create( - name="name", + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -286,9 +286,9 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -296,7 +296,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.inference.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -308,7 +308,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.inference.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 9c466e2f..55b056b8 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -25,7 +25,7 @@ class TestDataSources: @parametrize def test_method_create(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -33,22 +33,22 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - body_knowledge_base_uuid="knowledge_base_uuid", + body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, web_crawler_data_source={ - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -59,7 +59,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -71,7 +71,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -95,7 +95,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_list(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -103,7 +103,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -113,7 +113,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -125,7 +125,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -147,8 +147,8 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_delete(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) @@ -156,8 +156,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -169,8 +169,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -185,14 +185,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -205,7 +205,7 @@ class TestAsyncDataSources: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -213,22 +213,22 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - body_knowledge_base_uuid="knowledge_base_uuid", + body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, web_crawler_data_source={ - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -239,7 +239,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -251,7 +251,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -275,7 +275,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -283,7 +283,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -293,7 +293,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -305,7 +305,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -327,8 +327,8 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) @@ -336,8 +336,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -349,8 +349,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -365,12 +365,12 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index 8bf1829f..ed32d7f8 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -33,8 +33,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuids=["example string"], + knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @@ -185,7 +185,7 @@ def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -193,8 +193,8 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -202,7 +202,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -214,7 +214,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,8 +248,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuids=["example string"], + knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @@ -400,7 +400,7 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -408,8 +408,8 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -417,7 +417,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -429,7 +429,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 79bfcdc3..c61a97ea 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -34,8 +34,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.create( - api_key="api_key", - name="name", + api_key='"sk-ant-12345678901234567890123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @@ -107,7 +107,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -115,10 +115,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -126,7 +126,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.models.providers.anthropic.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.models.providers.anthropic.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -239,7 +239,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_list_agents(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @@ -247,7 +247,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @parametrize def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -257,7 +257,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list_agents(self, client: GradientAI) -> None: response = client.models.providers.anthropic.with_raw_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -269,7 +269,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_agents(self, client: GradientAI) -> None: with client.models.providers.anthropic.with_streaming_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -303,8 +303,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.create( - api_key="api_key", - name="name", + api_key='"sk-ant-12345678901234567890123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @@ -376,7 +376,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -384,10 +384,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -395,7 +395,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.anthropic.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -407,7 +407,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.anthropic.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @@ -516,7 +516,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -526,7 +526,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -538,7 +538,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 2640601e..7fde1a69 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -34,8 +34,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.create( - api_key="api_key", - name="name", + api_key='"sk-proj--123456789098765432123456789"', + name='"Production Key"', ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @@ -107,7 +107,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: openai = client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -115,10 +115,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -126,7 +126,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.models.providers.openai.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.models.providers.openai.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -239,7 +239,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_agents(self, client: GradientAI) -> None: openai = client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @@ -247,7 +247,7 @@ def test_method_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -257,7 +257,7 @@ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> Non @parametrize def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: response = client.models.providers.openai.with_raw_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -269,7 +269,7 @@ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: with client.models.providers.openai.with_streaming_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -303,8 +303,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.create( - api_key="api_key", - name="name", + api_key='"sk-proj--123456789098765432123456789"', + name='"Production Key"', ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @@ -376,7 +376,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -384,10 +384,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -395,7 +395,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.openai.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -407,7 +407,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.openai.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @@ -516,7 +516,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> No @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -526,7 +526,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -538,7 +538,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2cc0e080..8a6a7d69 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -34,16 +34,16 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: agent = client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + knowledge_base_uuid=["example string"], + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Agent"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -115,7 +115,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: agent = client.agents.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -123,22 +123,23 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: agent = client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + conversation_logs_enabled=True, + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + k=5, + max_tokens=100, + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My New Agent Name"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', provide_citations=True, retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", + tags=["example string"], + temperature=0.7, + top_p=0.9, + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -146,7 +147,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -158,7 +159,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -260,7 +261,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_update_status(self, client: GradientAI) -> None: agent = client.agents.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -268,8 +269,8 @@ def test_method_update_status(self, client: GradientAI) -> None: @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: agent = client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', visibility="VISIBILITY_UNKNOWN", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -278,7 +279,7 @@ def test_method_update_status_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: response = client.agents.with_raw_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -290,7 +291,7 @@ def test_raw_response_update_status(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: with client.agents.with_streaming_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -324,16 +325,16 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + knowledge_base_uuid=["example string"], + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Agent"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -405,7 +406,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -413,22 +414,23 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + conversation_logs_enabled=True, + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + k=5, + max_tokens=100, + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My New Agent Name"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', provide_citations=True, retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", + tags=["example string"], + temperature=0.7, + top_p=0.9, + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -436,7 +438,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -448,7 +450,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -550,7 +552,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -558,8 +560,8 @@ async def test_method_update_status(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', visibility="VISIBILITY_UNKNOWN", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -568,7 +570,7 @@ async def test_method_update_status_with_all_params(self, async_client: AsyncGra @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.with_raw_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -580,7 +582,7 @@ async def test_raw_response_update_status(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.with_streaming_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py new file mode 100644 index 00000000..22f3d2d0 --- /dev/null +++ b/tests/api_resources/test_gpu_droplets.py @@ -0,0 +1,912 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import ( + GPUDropletListResponse, + GPUDropletCreateResponse, + GPUDropletRetrieveResponse, + GPUDropletListKernelsResponse, + GPUDropletListFirewallsResponse, + GPUDropletListNeighborsResponse, + GPUDropletListSnapshotsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestGPUDroplets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.retrieve( + 1, + ) + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.retrieve( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.retrieve( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list( + name="name", + page=1, + per_page=1, + tag_name="tag_name", + type="droplets", + ) + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.delete( + 1, + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.delete( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.delete( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete_by_tag(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.delete_by_tag( + tag_name="tag_name", + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_by_tag(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.delete_by_tag( + tag_name="tag_name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_by_tag(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.delete_by_tag( + tag_name="tag_name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_firewalls(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_firewalls( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_firewalls_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_firewalls( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_firewalls(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_firewalls( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_firewalls(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_firewalls( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_kernels(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_kernels( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_kernels_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_kernels( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_kernels(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_kernels( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_kernels(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_kernels( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_neighbors(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_neighbors( + 1, + ) + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_neighbors(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_neighbors( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_neighbors(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_neighbors( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_snapshots(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_snapshots( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_snapshots_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_snapshots( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_snapshots(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_snapshots( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_snapshots(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_snapshots( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncGPUDroplets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.retrieve( + 1, + ) + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.retrieve( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.retrieve( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list( + name="name", + page=1, + per_page=1, + tag_name="tag_name", + type="droplets", + ) + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.delete( + 1, + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.delete( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.delete( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.delete_by_tag( + tag_name="tag_name", + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.delete_by_tag( + tag_name="tag_name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.delete_by_tag( + tag_name="tag_name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_firewalls(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_firewalls( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_firewalls( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_firewalls( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_firewalls( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_kernels(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_kernels( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_kernels( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_kernels(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_kernels( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_kernels(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_kernels( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_neighbors(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_neighbors( + 1, + ) + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_neighbors( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_neighbors( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_snapshots(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_snapshots( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_snapshots( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_snapshots( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_snapshots( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 508820ce..8a331b52 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -33,42 +33,42 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.create( - database_id="database_id", + database_id='"12345678-1234-1234-1234-123456789012"', datasources=[ { "aws_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", + "bucket_name": '"example name"', + "bucket_region": '"example string"', "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - "item_path": "item_path", + "item_path": '"example string"', "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, "web_crawler_data_source": { - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, } ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], + vpc_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @@ -140,7 +140,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -148,13 +148,13 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + database_id='"12345678-1234-1234-1234-123456789012"', + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + tags=["example string"], + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -162,7 +162,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -174,7 +174,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -287,42 +287,42 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.create( - database_id="database_id", + database_id='"12345678-1234-1234-1234-123456789012"', datasources=[ { "aws_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", + "bucket_name": '"example name"', + "bucket_region": '"example string"', "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - "item_path": "item_path", + "item_path": '"example string"', "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, "web_crawler_data_source": { - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, } ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], + vpc_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @@ -394,7 +394,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -402,13 +402,13 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + database_id='"12345678-1234-1234-1234-123456789012"', + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + tags=["example string"], + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -416,7 +416,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -428,7 +428,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 5e119f71..fe837973 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse +from gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,19 +19,50 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + def test_method_retrieve(self, client: GradientAI) -> None: + model = client.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - model = client.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.models.with_raw_response.retrieve( + "llama3-8b-instruct", ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @@ -64,19 +95,50 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.models.with_raw_response.retrieve( + "llama3-8b-instruct", ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 8e25617f..4f232293 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -27,8 +27,8 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: region = client.regions.list( - serves_batch=True, - serves_inference=True, + page=1, + per_page=1, ) assert_matches_type(RegionListResponse, region, path=["response"]) @@ -70,8 +70,8 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: region = await async_client.regions.list( - serves_batch=True, - serves_inference=True, + page=1, + per_page=1, ) assert_matches_type(RegionListResponse, region, path=["response"]) From f3115ddb8f65f82a5227239ddc9df89b6b720b9a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:29:04 +0000 Subject: [PATCH 118/200] feat(api): add gpu droplets --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 48 +- api.md | 418 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{gradientai => do_gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{gradientai => do_gradientai}/_client.py | 0 src/{gradientai => do_gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{gradientai => do_gradientai}/_files.py | 0 src/{gradientai => do_gradientai}/_models.py | 0 src/{gradientai => do_gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{gradientai => do_gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{gradientai => do_gradientai}/_version.py | 2 +- src/{gradientai => do_gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/chat/__init__.py | 0 .../resources/agents/chat/chat.py | 0 .../resources/agents/chat/completions.py | 0 .../resources/agents/evaluation_datasets.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/evaluation_metrics.py | 0 .../agents/evaluation_metrics/models.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 0 .../workspaces/workspaces.py | 0 .../resources/agents/evaluation_runs.py | 0 .../resources/agents/evaluation_test_cases.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/routes.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/gpu_droplets/__init__.py | 0 .../gpu_droplets/account/__init__.py | 0 .../resources/gpu_droplets/account/account.py | 0 .../resources/gpu_droplets/account/keys.py | 0 .../resources/gpu_droplets/actions.py | 0 .../resources/gpu_droplets/autoscale.py | 0 .../resources/gpu_droplets/backups.py | 0 .../destroy_with_associated_resources.py | 0 .../gpu_droplets/firewalls/__init__.py | 0 .../gpu_droplets/firewalls/droplets.py | 0 .../gpu_droplets/firewalls/firewalls.py | 0 .../resources/gpu_droplets/firewalls/rules.py | 0 .../resources/gpu_droplets/firewalls/tags.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../gpu_droplets/floating_ips/actions.py | 0 .../gpu_droplets/floating_ips/floating_ips.py | 0 .../resources/gpu_droplets/gpu_droplets.py | 0 .../resources/gpu_droplets/images/__init__.py | 0 .../resources/gpu_droplets/images/actions.py | 0 .../resources/gpu_droplets/images/images.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../gpu_droplets/load_balancers/droplets.py | 0 .../load_balancers/forwarding_rules.py | 0 .../load_balancers/load_balancers.py | 0 .../resources/gpu_droplets/sizes.py | 0 .../resources/gpu_droplets/snapshots.py | 0 .../gpu_droplets/volumes/__init__.py | 0 .../resources/gpu_droplets/volumes/actions.py | 0 .../gpu_droplets/volumes/snapshots.py | 0 .../resources/gpu_droplets/volumes/volumes.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/indexing_jobs.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/models/__init__.py | 0 .../resources/models/models.py | 0 .../resources/models/providers/__init__.py | 0 .../resources/models/providers/anthropic.py | 0 .../resources/models/providers/openai.py | 0 .../resources/models/providers/providers.py | 0 .../resources/regions.py | 0 .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 .../types/agents/chat/__init__.py | 0 .../agents/chat/completion_create_params.py | 0 .../agents/chat/completion_create_response.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_regions_params.py | 0 ...evaluation_metric_list_regions_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/model_list_params.py | 0 .../evaluation_metrics/model_list_response.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_params.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/droplet_backup_policy.py | 0 .../types/droplet_backup_policy_param.py | 0 .../types/gpu_droplet_create_params.py | 0 .../types/gpu_droplet_create_response.py | 0 .../types/gpu_droplet_delete_by_tag_params.py | 0 .../gpu_droplet_list_firewalls_params.py | 0 .../gpu_droplet_list_firewalls_response.py | 0 .../types/gpu_droplet_list_kernels_params.py | 0 .../gpu_droplet_list_kernels_response.py | 0 .../gpu_droplet_list_neighbors_response.py | 0 .../types/gpu_droplet_list_params.py | 0 .../types/gpu_droplet_list_response.py | 0 .../gpu_droplet_list_snapshots_params.py | 0 .../gpu_droplet_list_snapshots_response.py | 0 .../types/gpu_droplet_retrieve_response.py | 0 .../types/gpu_droplets/__init__.py | 0 .../types/gpu_droplets/account/__init__.py | 0 .../gpu_droplets/account/key_create_params.py | 0 .../account/key_create_response.py | 0 .../gpu_droplets/account/key_list_params.py | 0 .../gpu_droplets/account/key_list_response.py | 0 .../account/key_retrieve_response.py | 0 .../gpu_droplets/account/key_update_params.py | 0 .../account/key_update_response.py | 0 .../action_bulk_initiate_params.py | 0 .../action_bulk_initiate_response.py | 0 .../gpu_droplets/action_initiate_params.py | 0 .../gpu_droplets/action_initiate_response.py | 0 .../types/gpu_droplets/action_list_params.py | 0 .../gpu_droplets/action_list_response.py | 0 .../gpu_droplets/action_retrieve_response.py | 0 .../types/gpu_droplets/associated_resource.py | 0 .../gpu_droplets/autoscale_create_params.py | 0 .../gpu_droplets/autoscale_create_response.py | 0 .../autoscale_list_history_params.py | 0 .../autoscale_list_history_response.py | 0 .../autoscale_list_members_params.py | 0 .../autoscale_list_members_response.py | 0 .../gpu_droplets/autoscale_list_params.py | 0 .../gpu_droplets/autoscale_list_response.py | 0 .../types/gpu_droplets/autoscale_pool.py | 0 .../autoscale_pool_droplet_template.py | 0 .../autoscale_pool_droplet_template_param.py | 0 .../autoscale_pool_dynamic_config.py | 0 .../autoscale_pool_dynamic_config_param.py | 0 .../autoscale_pool_static_config.py | 0 .../autoscale_pool_static_config_param.py | 0 .../autoscale_retrieve_response.py | 0 .../gpu_droplets/autoscale_update_params.py | 0 .../gpu_droplets/autoscale_update_response.py | 0 .../types/gpu_droplets/backup_list_params.py | 0 .../backup_list_policies_params.py | 0 .../backup_list_policies_response.py | 0 .../gpu_droplets/backup_list_response.py | 0 ...backup_list_supported_policies_response.py | 0 .../backup_retrieve_policy_response.py | 0 .../types/gpu_droplets/current_utilization.py | 0 ...sociated_resource_check_status_response.py | 0 ...ciated_resource_delete_selective_params.py | 0 ..._with_associated_resource_list_response.py | 0 .../destroyed_associated_resource.py | 0 .../types/gpu_droplets/domains.py | 0 .../types/gpu_droplets/domains_param.py | 0 .../types/gpu_droplets/firewall.py | 0 .../gpu_droplets/firewall_create_params.py | 0 .../gpu_droplets/firewall_create_response.py | 0 .../gpu_droplets/firewall_list_params.py | 0 .../gpu_droplets/firewall_list_response.py | 0 .../types/gpu_droplets/firewall_param.py | 0 .../firewall_retrieve_response.py | 0 .../gpu_droplets/firewall_update_params.py | 0 .../gpu_droplets/firewall_update_response.py | 0 .../types/gpu_droplets/firewalls/__init__.py | 0 .../firewalls/droplet_add_params.py | 0 .../firewalls/droplet_remove_params.py | 0 .../gpu_droplets/firewalls/rule_add_params.py | 0 .../firewalls/rule_remove_params.py | 0 .../gpu_droplets/firewalls/tag_add_params.py | 0 .../firewalls/tag_remove_params.py | 0 .../types/gpu_droplets/floating_ip.py | 0 .../gpu_droplets/floating_ip_create_params.py | 0 .../floating_ip_create_response.py | 0 .../gpu_droplets/floating_ip_list_params.py | 0 .../gpu_droplets/floating_ip_list_response.py | 0 .../floating_ip_retrieve_response.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../floating_ips/action_create_params.py | 0 .../floating_ips/action_create_response.py | 0 .../floating_ips/action_list_response.py | 0 .../floating_ips/action_retrieve_response.py | 0 .../types/gpu_droplets/forwarding_rule.py | 0 .../gpu_droplets/forwarding_rule_param.py | 0 .../types/gpu_droplets/glb_settings.py | 0 .../types/gpu_droplets/glb_settings_param.py | 0 .../types/gpu_droplets/health_check.py | 0 .../types/gpu_droplets/health_check_param.py | 0 .../types/gpu_droplets/image_create_params.py | 0 .../gpu_droplets/image_create_response.py | 0 .../types/gpu_droplets/image_list_params.py | 0 .../types/gpu_droplets/image_list_response.py | 0 .../gpu_droplets/image_retrieve_response.py | 0 .../types/gpu_droplets/image_update_params.py | 0 .../gpu_droplets/image_update_response.py | 0 .../types/gpu_droplets/images/__init__.py | 0 .../images/action_create_params.py | 0 .../images/action_list_response.py | 0 .../types/gpu_droplets/lb_firewall.py | 0 .../types/gpu_droplets/lb_firewall_param.py | 0 .../types/gpu_droplets/load_balancer.py | 0 .../load_balancer_create_params.py | 0 .../load_balancer_create_response.py | 0 .../gpu_droplets/load_balancer_list_params.py | 0 .../load_balancer_list_response.py | 0 .../load_balancer_retrieve_response.py | 0 .../load_balancer_update_params.py | 0 .../load_balancer_update_response.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../load_balancers/droplet_add_params.py | 0 .../load_balancers/droplet_remove_params.py | 0 .../forwarding_rule_add_params.py | 0 .../forwarding_rule_remove_params.py | 0 .../types/gpu_droplets/size_list_params.py | 0 .../types/gpu_droplets/size_list_response.py | 0 .../gpu_droplets/snapshot_list_params.py | 0 .../gpu_droplets/snapshot_list_response.py | 0 .../snapshot_retrieve_response.py | 0 .../types/gpu_droplets/sticky_sessions.py | 0 .../gpu_droplets/sticky_sessions_param.py | 0 .../gpu_droplets/volume_create_params.py | 0 .../gpu_droplets/volume_create_response.py | 0 .../volume_delete_by_name_params.py | 0 .../types/gpu_droplets/volume_list_params.py | 0 .../gpu_droplets/volume_list_response.py | 0 .../gpu_droplets/volume_retrieve_response.py | 0 .../types/gpu_droplets/volumes/__init__.py | 0 .../volumes/action_initiate_by_id_params.py | 0 .../volumes/action_initiate_by_id_response.py | 0 .../volumes/action_initiate_by_name_params.py | 0 .../action_initiate_by_name_response.py | 0 .../volumes/action_list_params.py | 0 .../volumes/action_list_response.py | 0 .../volumes/action_retrieve_params.py | 0 .../volumes/action_retrieve_response.py | 0 .../volumes/snapshot_create_params.py | 0 .../volumes/snapshot_create_response.py | 0 .../volumes/snapshot_list_params.py | 0 .../volumes/snapshot_list_response.py | 0 .../volumes/snapshot_retrieve_response.py | 0 .../gpu_droplets/volumes/volume_action.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/model_list_response.py | 0 .../types/model_retrieve_response.py | 0 .../types/models/__init__.py | 0 .../types/models/providers/__init__.py | 0 .../providers/anthropic_create_params.py | 0 .../providers/anthropic_create_response.py | 0 .../providers/anthropic_delete_response.py | 0 .../providers/anthropic_list_agents_params.py | 0 .../anthropic_list_agents_response.py | 0 .../models/providers/anthropic_list_params.py | 0 .../providers/anthropic_list_response.py | 0 .../providers/anthropic_retrieve_response.py | 0 .../providers/anthropic_update_params.py | 0 .../providers/anthropic_update_response.py | 0 .../models/providers/openai_create_params.py | 0 .../providers/openai_create_response.py | 0 .../providers/openai_delete_response.py | 0 .../models/providers/openai_list_params.py | 0 .../models/providers/openai_list_response.py | 0 .../openai_retrieve_agents_params.py | 0 .../openai_retrieve_agents_response.py | 0 .../providers/openai_retrieve_response.py | 0 .../models/providers/openai_update_params.py | 0 .../providers/openai_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/action.py | 0 .../types/shared/action_link.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../types/shared/backward_links.py | 0 .../types/shared/chat_completion_chunk.py | 0 .../shared/chat_completion_token_logprob.py | 0 .../types/shared/completion_usage.py | 0 .../types/shared/disk_info.py | 0 .../types/shared/droplet.py | 0 .../shared/droplet_next_backup_window.py | 0 .../types/shared/firewall_rule_target.py | 0 .../types/shared/forward_links.py | 0 .../types/shared/garbage_collection.py | 0 .../types/shared/gpu_info.py | 0 .../types/shared/image.py | 0 .../types/shared/kernel.py | 0 .../types/shared/meta_properties.py | 0 .../types/shared/network_v4.py | 0 .../types/shared/network_v6.py | 0 .../types/shared/page_links.py | 0 .../types/shared/region.py | 0 .../types/shared/size.py | 0 .../types/shared/snapshots.py | 0 .../types/shared/subscription.py | 0 .../types/shared/subscription_tier_base.py | 0 .../types/shared/vpc_peering.py | 0 .../types/shared_params/__init__.py | 0 .../shared_params/firewall_rule_target.py | 0 .../agents/chat/test_completions.py | 4 +- .../agents/evaluation_metrics/test_models.py | 4 +- .../evaluation_metrics/test_workspaces.py | 4 +- .../workspaces/test_agents.py | 4 +- tests/api_resources/agents/test_api_keys.py | 4 +- .../agents/test_evaluation_datasets.py | 4 +- .../agents/test_evaluation_metrics.py | 4 +- .../agents/test_evaluation_runs.py | 4 +- .../agents/test_evaluation_test_cases.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_routes.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../gpu_droplets/account/test_keys.py | 4 +- .../gpu_droplets/firewalls/test_droplets.py | 2 +- .../gpu_droplets/firewalls/test_rules.py | 2 +- .../gpu_droplets/firewalls/test_tags.py | 2 +- .../gpu_droplets/floating_ips/test_actions.py | 4 +- .../gpu_droplets/images/test_actions.py | 6 +- .../load_balancers/test_droplets.py | 2 +- .../load_balancers/test_forwarding_rules.py | 2 +- .../gpu_droplets/test_actions.py | 4 +- .../gpu_droplets/test_autoscale.py | 4 +- .../gpu_droplets/test_backups.py | 4 +- .../test_destroy_with_associated_resources.py | 4 +- .../gpu_droplets/test_firewalls.py | 4 +- .../gpu_droplets/test_floating_ips.py | 4 +- .../api_resources/gpu_droplets/test_images.py | 4 +- .../gpu_droplets/test_load_balancers.py | 4 +- .../api_resources/gpu_droplets/test_sizes.py | 4 +- .../gpu_droplets/test_snapshots.py | 4 +- .../gpu_droplets/test_volumes.py | 4 +- .../gpu_droplets/volumes/test_actions.py | 4 +- .../gpu_droplets/volumes/test_snapshots.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../knowledge_bases/test_indexing_jobs.py | 4 +- .../models/providers/test_anthropic.py | 4 +- .../models/providers/test_openai.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_gpu_droplets.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 48 +- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 506 files changed, 398 insertions(+), 398 deletions(-) rename src/{gradientai => do_gradientai}/__init__.py (95%) rename src/{gradientai => do_gradientai}/_base_client.py (99%) rename src/{gradientai => do_gradientai}/_client.py (100%) rename src/{gradientai => do_gradientai}/_compat.py (100%) rename src/{gradientai => do_gradientai}/_constants.py (100%) rename src/{gradientai => do_gradientai}/_exceptions.py (100%) rename src/{gradientai => do_gradientai}/_files.py (100%) rename src/{gradientai => do_gradientai}/_models.py (100%) rename src/{gradientai => do_gradientai}/_qs.py (100%) rename src/{gradientai => do_gradientai}/_resource.py (100%) rename src/{gradientai => do_gradientai}/_response.py (99%) rename src/{gradientai => do_gradientai}/_streaming.py (100%) rename src/{gradientai => do_gradientai}/_types.py (99%) rename src/{gradientai => do_gradientai}/_utils/__init__.py (100%) rename src/{gradientai => do_gradientai}/_utils/_logs.py (75%) rename src/{gradientai => do_gradientai}/_utils/_proxy.py (100%) rename src/{gradientai => do_gradientai}/_utils/_reflection.py (100%) rename src/{gradientai => do_gradientai}/_utils/_resources_proxy.py (50%) rename src/{gradientai => do_gradientai}/_utils/_streams.py (100%) rename src/{gradientai => do_gradientai}/_utils/_sync.py (100%) rename src/{gradientai => do_gradientai}/_utils/_transform.py (100%) rename src/{gradientai => do_gradientai}/_utils/_typing.py (100%) rename src/{gradientai => do_gradientai}/_utils/_utils.py (100%) rename src/{gradientai => do_gradientai}/_version.py (83%) rename src/{gradientai => do_gradientai}/py.typed (100%) rename src/{gradientai => do_gradientai}/resources/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_datasets.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/evaluation_metrics.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/models.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/workspaces.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_runs.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_test_cases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/functions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/routes.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/versions.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/account.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/keys.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/autoscale.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/backups.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/destroy_with_associated_resources.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/firewalls.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/rules.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/tags.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/floating_ips.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/gpu_droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/images.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/forwarding_rules.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/load_balancers.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/sizes.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/snapshots.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/snapshots.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/volumes.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/inference.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/indexing_jobs.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/models/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/models/models.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/anthropic.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/openai.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/providers.py (100%) rename src/{gradientai => do_gradientai}/resources/regions.py (100%) rename src/{gradientai => do_gradientai}/types/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_metric.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_metric_result.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_prompt.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_run.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_test_case.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_star_metric.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_star_metric_param.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_regions_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_regions_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/model_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_list_results_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_add_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_view_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_agreement.py (100%) rename src/{gradientai => do_gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_deployment_visibility.py (100%) rename src/{gradientai => do_gradientai}/types/api_knowledge_base.py (100%) rename src/{gradientai => do_gradientai}/types/api_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_model_version.py (100%) rename src/{gradientai => do_gradientai}/types/api_openai_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_retrieval_method.py (100%) rename src/{gradientai => do_gradientai}/types/api_workspace.py (100%) rename src/{gradientai => do_gradientai}/types/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/droplet_backup_policy.py (100%) rename src/{gradientai => do_gradientai}/types/droplet_backup_policy_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_delete_by_tag_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_firewalls_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_firewalls_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_kernels_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_kernels_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_neighbors_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_snapshots_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_snapshots_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_bulk_initiate_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_bulk_initiate_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_initiate_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_initiate_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/associated_resource.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_history_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_history_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_members_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_members_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_droplet_template.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_droplet_template_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_static_config.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_static_config_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_policies_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_policies_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_supported_policies_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_retrieve_policy_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/current_utilization.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroyed_associated_resource.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/domains.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/domains_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/droplet_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/droplet_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/rule_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/rule_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/tag_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/tag_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/forwarding_rule.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/forwarding_rule_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/glb_settings.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/glb_settings_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/health_check.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/health_check_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/action_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/lb_firewall.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/lb_firewall_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/droplet_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/droplet_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/size_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/size_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/sticky_sessions.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/sticky_sessions_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_delete_by_name_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_retrieve_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/volume_action.py (100%) rename src/{gradientai => do_gradientai}/types/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/shared/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/shared/action.py (100%) rename src/{gradientai => do_gradientai}/types/shared/action_link.py (100%) rename src/{gradientai => do_gradientai}/types/shared/api_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/api_meta.py (100%) rename src/{gradientai => do_gradientai}/types/shared/backward_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/chat_completion_chunk.py (100%) rename src/{gradientai => do_gradientai}/types/shared/chat_completion_token_logprob.py (100%) rename src/{gradientai => do_gradientai}/types/shared/completion_usage.py (100%) rename src/{gradientai => do_gradientai}/types/shared/disk_info.py (100%) rename src/{gradientai => do_gradientai}/types/shared/droplet.py (100%) rename src/{gradientai => do_gradientai}/types/shared/droplet_next_backup_window.py (100%) rename src/{gradientai => do_gradientai}/types/shared/firewall_rule_target.py (100%) rename src/{gradientai => do_gradientai}/types/shared/forward_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/garbage_collection.py (100%) rename src/{gradientai => do_gradientai}/types/shared/gpu_info.py (100%) rename src/{gradientai => do_gradientai}/types/shared/image.py (100%) rename src/{gradientai => do_gradientai}/types/shared/kernel.py (100%) rename src/{gradientai => do_gradientai}/types/shared/meta_properties.py (100%) rename src/{gradientai => do_gradientai}/types/shared/network_v4.py (100%) rename src/{gradientai => do_gradientai}/types/shared/network_v6.py (100%) rename src/{gradientai => do_gradientai}/types/shared/page_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/region.py (100%) rename src/{gradientai => do_gradientai}/types/shared/size.py (100%) rename src/{gradientai => do_gradientai}/types/shared/snapshots.py (100%) rename src/{gradientai => do_gradientai}/types/shared/subscription.py (100%) rename src/{gradientai => do_gradientai}/types/shared/subscription_tier_base.py (100%) rename src/{gradientai => do_gradientai}/types/shared/vpc_peering.py (100%) rename src/{gradientai => do_gradientai}/types/shared_params/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/shared_params/firewall_rule_target.py (100%) diff --git a/.stats.yml b/.stats.yml index 5f9d16dd..c2a4edd2 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 683ea6ba4d63037c1c72484e5936e73c +config_hash: 47020371e6a9b81bd2a937e1c587f774 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 086907ef..4f59c83a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/gradientai/lib/` and `examples/` directories. +modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index 67a567cc..0cfdfb76 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -56,7 +56,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -96,8 +96,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import asyncio -from gradientai import DefaultAioHttpClient -from gradientai import AsyncGradientAI +from do_gradientai import DefaultAioHttpClient +from do_gradientai import AsyncGradientAI async def main() -> None: @@ -125,7 +125,7 @@ asyncio.run(main()) We provide support for streaming responses using Server Side Events (SSE). ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -146,7 +146,7 @@ for completion in stream: The async client uses the exact same interface. ```python -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI() @@ -178,7 +178,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -197,16 +197,16 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `gradientai.APIError`. +All errors inherit from `do_gradientai.APIError`. ```python -import gradientai -from gradientai import GradientAI +import do_gradientai +from do_gradientai import GradientAI client = GradientAI() @@ -220,12 +220,12 @@ try: ], model="llama3.3-70b-instruct", ) -except gradientai.APIConnectionError as e: +except do_gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except gradientai.RateLimitError as e: +except do_gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except gradientai.APIStatusError as e: +except do_gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -253,7 +253,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -279,7 +279,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -339,7 +339,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() response = client.chat.completions.with_raw_response.create( @@ -355,9 +355,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.choices) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -427,7 +427,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from gradientai import GradientAI, DefaultHttpxClient +from do_gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -450,7 +450,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from gradientai import GradientAI +from do_gradientai import GradientAI with GradientAI() as client: # make requests here @@ -478,8 +478,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import gradientai -print(gradientai.__version__) +import do_gradientai +print(do_gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 8682940b..fa4e0edb 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from gradientai.types import ( +from do_gradientai.types import ( Action, ActionLink, APILinks, @@ -37,7 +37,7 @@ from gradientai.types import ( Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -57,19 +57,19 @@ from gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -80,11 +80,11 @@ from gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Chat @@ -93,19 +93,19 @@ Methods: Types: ```python -from gradientai.types.agents.chat import CompletionCreateResponse +from do_gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) @@ -113,15 +113,15 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces Types: ```python -from gradientai.types.agents.evaluation_metrics import ( +from do_gradientai.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -133,19 +133,19 @@ from gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from gradientai.types.agents.evaluation_metrics.workspaces import ( +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -153,27 +153,27 @@ from gradientai.types.agents.evaluation_metrics.workspaces import ( Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ### Models Types: ```python -from gradientai.types.agents.evaluation_metrics import ModelListResponse +from do_gradientai.types.agents.evaluation_metrics import ModelListResponse ``` Methods: -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse ## EvaluationRuns Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -187,17 +187,17 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -210,18 +210,18 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -229,15 +229,15 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -246,43 +246,43 @@ from gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -292,10 +292,10 @@ from gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # Chat @@ -304,31 +304,31 @@ Methods: Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai.types.chat import CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # Regions Types: ```python -from gradientai.types import RegionListResponse +from do_gradientai.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -340,18 +340,18 @@ from gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -365,16 +365,16 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -387,11 +387,11 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Inference @@ -400,7 +400,7 @@ Methods: Types: ```python -from gradientai.types.inference import ( +from do_gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -412,18 +412,18 @@ from gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Models Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgreement, APIModel, APIModelVersion, @@ -434,8 +434,8 @@ from gradientai.types import ( Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -444,7 +444,7 @@ Methods: Types: ```python -from gradientai.types.models.providers import ( +from do_gradientai.types.models.providers import ( AnthropicCreateResponse, AnthropicRetrieveResponse, AnthropicUpdateResponse, @@ -456,19 +456,19 @@ from gradientai.types.models.providers import ( Methods: -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse ### OpenAI Types: ```python -from gradientai.types.models.providers import ( +from do_gradientai.types.models.providers import ( OpenAICreateResponse, OpenAIRetrieveResponse, OpenAIUpdateResponse, @@ -480,19 +480,19 @@ from gradientai.types.models.providers import ( Methods: -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse # GPUDroplets Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( DropletBackupPolicy, GPUDropletCreateResponse, GPUDropletRetrieveResponse, @@ -506,22 +506,22 @@ from gradientai.types import ( Methods: -- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse -- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse -- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse -- client.gpu_droplets.delete(droplet_id) -> None -- client.gpu_droplets.delete_by_tag(\*\*params) -> None -- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse -- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse -- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse -- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse ## Backups Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupListSupportedPoliciesResponse, @@ -531,17 +531,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse ## Actions Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( ActionRetrieveResponse, ActionListResponse, ActionBulkInitiateResponse, @@ -551,17 +551,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse ## DestroyWithAssociatedResources Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( AssociatedResource, DestroyedAssociatedResource, DestroyWithAssociatedResourceListResponse, @@ -571,18 +571,18 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None ## Autoscale Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( AutoscalePool, AutoscalePoolDropletTemplate, AutoscalePoolDynamicConfig, @@ -599,21 +599,21 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse ## Firewalls Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( Firewall, FirewallCreateResponse, FirewallRetrieveResponse, @@ -624,39 +624,39 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse -- client.gpu_droplets.firewalls.delete(firewall_id) -> None +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None ### Droplets Methods: -- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None ### Tags Methods: -- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None ### Rules Methods: -- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None ## FloatingIPs Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( FloatingIP, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -666,17 +666,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.gpu_droplets.floating_ips.delete(floating_ip) -> None +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.floating_ips import ( +from do_gradientai.types.gpu_droplets.floating_ips import ( ActionCreateResponse, ActionRetrieveResponse, ActionListResponse, @@ -685,16 +685,16 @@ from gradientai.types.gpu_droplets.floating_ips import ( Methods: -- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse ## Images Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( ImageCreateResponse, ImageRetrieveResponse, ImageUpdateResponse, @@ -704,32 +704,32 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse -- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse -- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse -- client.gpu_droplets.images.delete(image_id) -> None +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.images import ActionListResponse +from do_gradientai.types.gpu_droplets.images import ActionListResponse ``` Methods: -- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action -- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse ## LoadBalancers Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( Domains, ForwardingRule, GlbSettings, @@ -746,59 +746,59 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.gpu_droplets.load_balancers.delete(lb_id) -> None -- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None ### Droplets Methods: -- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None ### ForwardingRules Methods: -- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None ## Sizes Types: ```python -from gradientai.types.gpu_droplets import SizeListResponse +from do_gradientai.types.gpu_droplets import SizeListResponse ``` Methods: -- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse ## Snapshots Types: ```python -from gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +from do_gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse ``` Methods: -- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse -- client.gpu_droplets.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None ## Volumes Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse, @@ -807,18 +807,18 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse -- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse -- client.gpu_droplets.volumes.delete(volume_id) -> None -- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai.types.gpu_droplets.volumes import ( VolumeAction, ActionRetrieveResponse, ActionListResponse, @@ -829,17 +829,17 @@ from gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse ### Snapshots Types: ```python -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai.types.gpu_droplets.volumes import ( SnapshotCreateResponse, SnapshotRetrieveResponse, SnapshotListResponse, @@ -848,10 +848,10 @@ from gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None ## Account @@ -860,7 +860,7 @@ Methods: Types: ```python -from gradientai.types.gpu_droplets.account import ( +from do_gradientai.types.gpu_droplets.account import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -870,8 +870,8 @@ from gradientai.types.gpu_droplets.account import ( Methods: -- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse -- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse -- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None diff --git a/mypy.ini b/mypy.ini index 748d8234..82b0c891 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 0b307b64..7a93ec8c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,14 +79,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import gradientai'" +"check:importable" = "python -c 'import do_gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -99,7 +99,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/gradientai"] +packages = ["src/do_gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -202,7 +202,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["gradientai", "tests"] +known-first-party = ["do_gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index 2ff9a58c..a320c1a8 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/gradientai/_version.py" + "src/do_gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index 37b38f6f..e46e909b 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import gradientai' +rye run python -c 'import do_gradientai' diff --git a/src/gradientai/__init__.py b/src/do_gradientai/__init__.py similarity index 95% rename from src/gradientai/__init__.py rename to src/do_gradientai/__init__.py index 3316fe47..41b943b2 100644 --- a/src/gradientai/__init__.py +++ b/src/do_gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError +# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "gradientai" + __locals[__name].__module__ = "do_gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/gradientai/_base_client.py b/src/do_gradientai/_base_client.py similarity index 99% rename from src/gradientai/_base_client.py rename to src/do_gradientai/_base_client.py index 379c27d1..326c662c 100644 --- a/src/gradientai/_base_client.py +++ b/src/do_gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/gradientai/_client.py b/src/do_gradientai/_client.py similarity index 100% rename from src/gradientai/_client.py rename to src/do_gradientai/_client.py diff --git a/src/gradientai/_compat.py b/src/do_gradientai/_compat.py similarity index 100% rename from src/gradientai/_compat.py rename to src/do_gradientai/_compat.py diff --git a/src/gradientai/_constants.py b/src/do_gradientai/_constants.py similarity index 100% rename from src/gradientai/_constants.py rename to src/do_gradientai/_constants.py diff --git a/src/gradientai/_exceptions.py b/src/do_gradientai/_exceptions.py similarity index 100% rename from src/gradientai/_exceptions.py rename to src/do_gradientai/_exceptions.py diff --git a/src/gradientai/_files.py b/src/do_gradientai/_files.py similarity index 100% rename from src/gradientai/_files.py rename to src/do_gradientai/_files.py diff --git a/src/gradientai/_models.py b/src/do_gradientai/_models.py similarity index 100% rename from src/gradientai/_models.py rename to src/do_gradientai/_models.py diff --git a/src/gradientai/_qs.py b/src/do_gradientai/_qs.py similarity index 100% rename from src/gradientai/_qs.py rename to src/do_gradientai/_qs.py diff --git a/src/gradientai/_resource.py b/src/do_gradientai/_resource.py similarity index 100% rename from src/gradientai/_resource.py rename to src/do_gradientai/_resource.py diff --git a/src/gradientai/_response.py b/src/do_gradientai/_response.py similarity index 99% rename from src/gradientai/_response.py rename to src/do_gradientai/_response.py index 2037e4ca..8ca43971 100644 --- a/src/gradientai/_response.py +++ b/src/do_gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", ) diff --git a/src/gradientai/_streaming.py b/src/do_gradientai/_streaming.py similarity index 100% rename from src/gradientai/_streaming.py rename to src/do_gradientai/_streaming.py diff --git a/src/gradientai/_types.py b/src/do_gradientai/_types.py similarity index 99% rename from src/gradientai/_types.py rename to src/do_gradientai/_types.py index 1bac876d..c356c700 100644 --- a/src/gradientai/_types.py +++ b/src/do_gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from gradientai import NoneType +# from do_gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/gradientai/_utils/__init__.py b/src/do_gradientai/_utils/__init__.py similarity index 100% rename from src/gradientai/_utils/__init__.py rename to src/do_gradientai/_utils/__init__.py diff --git a/src/gradientai/_utils/_logs.py b/src/do_gradientai/_utils/_logs.py similarity index 75% rename from src/gradientai/_utils/_logs.py rename to src/do_gradientai/_utils/_logs.py index 9047e5c8..ac45b1a5 100644 --- a/src/gradientai/_utils/_logs.py +++ b/src/do_gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("gradientai") +logger: logging.Logger = logging.getLogger("do_gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/gradientai/_utils/_proxy.py b/src/do_gradientai/_utils/_proxy.py similarity index 100% rename from src/gradientai/_utils/_proxy.py rename to src/do_gradientai/_utils/_proxy.py diff --git a/src/gradientai/_utils/_reflection.py b/src/do_gradientai/_utils/_reflection.py similarity index 100% rename from src/gradientai/_utils/_reflection.py rename to src/do_gradientai/_utils/_reflection.py diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/do_gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/gradientai/_utils/_resources_proxy.py rename to src/do_gradientai/_utils/_resources_proxy.py index b3bc4931..03763c3b 100644 --- a/src/gradientai/_utils/_resources_proxy.py +++ b/src/do_gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `gradientai.resources` module. + """A proxy for the `do_gradientai.resources` module. - This is used so that we can lazily import `gradientai.resources` only when - needed *and* so that users can just import `gradientai` and reference `gradientai.resources` + This is used so that we can lazily import `do_gradientai.resources` only when + needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("gradientai.resources") + mod = importlib.import_module("do_gradientai.resources") return mod diff --git a/src/gradientai/_utils/_streams.py b/src/do_gradientai/_utils/_streams.py similarity index 100% rename from src/gradientai/_utils/_streams.py rename to src/do_gradientai/_utils/_streams.py diff --git a/src/gradientai/_utils/_sync.py b/src/do_gradientai/_utils/_sync.py similarity index 100% rename from src/gradientai/_utils/_sync.py rename to src/do_gradientai/_utils/_sync.py diff --git a/src/gradientai/_utils/_transform.py b/src/do_gradientai/_utils/_transform.py similarity index 100% rename from src/gradientai/_utils/_transform.py rename to src/do_gradientai/_utils/_transform.py diff --git a/src/gradientai/_utils/_typing.py b/src/do_gradientai/_utils/_typing.py similarity index 100% rename from src/gradientai/_utils/_typing.py rename to src/do_gradientai/_utils/_typing.py diff --git a/src/gradientai/_utils/_utils.py b/src/do_gradientai/_utils/_utils.py similarity index 100% rename from src/gradientai/_utils/_utils.py rename to src/do_gradientai/_utils/_utils.py diff --git a/src/gradientai/_version.py b/src/do_gradientai/_version.py similarity index 83% rename from src/gradientai/_version.py rename to src/do_gradientai/_version.py index c1d566e6..d69cef74 100644 --- a/src/gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "gradientai" +__title__ = "do_gradientai" __version__ = "0.1.0-alpha.14" # x-release-please-version diff --git a/src/gradientai/py.typed b/src/do_gradientai/py.typed similarity index 100% rename from src/gradientai/py.typed rename to src/do_gradientai/py.typed diff --git a/src/gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py similarity index 100% rename from src/gradientai/resources/__init__.py rename to src/do_gradientai/resources/__init__.py diff --git a/src/gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py similarity index 100% rename from src/gradientai/resources/agents/__init__.py rename to src/do_gradientai/resources/agents/__init__.py diff --git a/src/gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py similarity index 100% rename from src/gradientai/resources/agents/agents.py rename to src/do_gradientai/resources/agents/agents.py diff --git a/src/gradientai/resources/agents/api_keys.py b/src/do_gradientai/resources/agents/api_keys.py similarity index 100% rename from src/gradientai/resources/agents/api_keys.py rename to src/do_gradientai/resources/agents/api_keys.py diff --git a/src/gradientai/resources/agents/chat/__init__.py b/src/do_gradientai/resources/agents/chat/__init__.py similarity index 100% rename from src/gradientai/resources/agents/chat/__init__.py rename to src/do_gradientai/resources/agents/chat/__init__.py diff --git a/src/gradientai/resources/agents/chat/chat.py b/src/do_gradientai/resources/agents/chat/chat.py similarity index 100% rename from src/gradientai/resources/agents/chat/chat.py rename to src/do_gradientai/resources/agents/chat/chat.py diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/do_gradientai/resources/agents/chat/completions.py similarity index 100% rename from src/gradientai/resources/agents/chat/completions.py rename to src/do_gradientai/resources/agents/chat/completions.py diff --git a/src/gradientai/resources/agents/evaluation_datasets.py b/src/do_gradientai/resources/agents/evaluation_datasets.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_datasets.py rename to src/do_gradientai/resources/agents/evaluation_datasets.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/do_gradientai/resources/agents/evaluation_metrics/__init__.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/models.py b/src/do_gradientai/resources/agents/evaluation_metrics/models.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/models.py rename to src/do_gradientai/resources/agents/evaluation_metrics/models.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py diff --git a/src/gradientai/resources/agents/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_runs.py rename to src/do_gradientai/resources/agents/evaluation_runs.py diff --git a/src/gradientai/resources/agents/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_test_cases.py rename to src/do_gradientai/resources/agents/evaluation_test_cases.py diff --git a/src/gradientai/resources/agents/functions.py b/src/do_gradientai/resources/agents/functions.py similarity index 100% rename from src/gradientai/resources/agents/functions.py rename to src/do_gradientai/resources/agents/functions.py diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/do_gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/agents/knowledge_bases.py rename to src/do_gradientai/resources/agents/knowledge_bases.py diff --git a/src/gradientai/resources/agents/routes.py b/src/do_gradientai/resources/agents/routes.py similarity index 100% rename from src/gradientai/resources/agents/routes.py rename to src/do_gradientai/resources/agents/routes.py diff --git a/src/gradientai/resources/agents/versions.py b/src/do_gradientai/resources/agents/versions.py similarity index 100% rename from src/gradientai/resources/agents/versions.py rename to src/do_gradientai/resources/agents/versions.py diff --git a/src/gradientai/resources/chat/__init__.py b/src/do_gradientai/resources/chat/__init__.py similarity index 100% rename from src/gradientai/resources/chat/__init__.py rename to src/do_gradientai/resources/chat/__init__.py diff --git a/src/gradientai/resources/chat/chat.py b/src/do_gradientai/resources/chat/chat.py similarity index 100% rename from src/gradientai/resources/chat/chat.py rename to src/do_gradientai/resources/chat/chat.py diff --git a/src/gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py similarity index 100% rename from src/gradientai/resources/chat/completions.py rename to src/do_gradientai/resources/chat/completions.py diff --git a/src/gradientai/resources/gpu_droplets/__init__.py b/src/do_gradientai/resources/gpu_droplets/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/__init__.py rename to src/do_gradientai/resources/gpu_droplets/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/account/__init__.py b/src/do_gradientai/resources/gpu_droplets/account/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/__init__.py rename to src/do_gradientai/resources/gpu_droplets/account/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/account/account.py b/src/do_gradientai/resources/gpu_droplets/account/account.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/account.py rename to src/do_gradientai/resources/gpu_droplets/account/account.py diff --git a/src/gradientai/resources/gpu_droplets/account/keys.py b/src/do_gradientai/resources/gpu_droplets/account/keys.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/keys.py rename to src/do_gradientai/resources/gpu_droplets/account/keys.py diff --git a/src/gradientai/resources/gpu_droplets/actions.py b/src/do_gradientai/resources/gpu_droplets/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/actions.py rename to src/do_gradientai/resources/gpu_droplets/actions.py diff --git a/src/gradientai/resources/gpu_droplets/autoscale.py b/src/do_gradientai/resources/gpu_droplets/autoscale.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/autoscale.py rename to src/do_gradientai/resources/gpu_droplets/autoscale.py diff --git a/src/gradientai/resources/gpu_droplets/backups.py b/src/do_gradientai/resources/gpu_droplets/backups.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/backups.py rename to src/do_gradientai/resources/gpu_droplets/backups.py diff --git a/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py rename to src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/__init__.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/droplets.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/firewalls.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/rules.py b/src/do_gradientai/resources/gpu_droplets/firewalls/rules.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/rules.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/rules.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/tags.py b/src/do_gradientai/resources/gpu_droplets/firewalls/tags.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/tags.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/tags.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/__init__.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/actions.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py diff --git a/src/gradientai/resources/gpu_droplets/gpu_droplets.py b/src/do_gradientai/resources/gpu_droplets/gpu_droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/gpu_droplets.py rename to src/do_gradientai/resources/gpu_droplets/gpu_droplets.py diff --git a/src/gradientai/resources/gpu_droplets/images/__init__.py b/src/do_gradientai/resources/gpu_droplets/images/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/__init__.py rename to src/do_gradientai/resources/gpu_droplets/images/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/images/actions.py b/src/do_gradientai/resources/gpu_droplets/images/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/actions.py rename to src/do_gradientai/resources/gpu_droplets/images/actions.py diff --git a/src/gradientai/resources/gpu_droplets/images/images.py b/src/do_gradientai/resources/gpu_droplets/images/images.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/images.py rename to src/do_gradientai/resources/gpu_droplets/images/images.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/__init__.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/droplets.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py diff --git a/src/gradientai/resources/gpu_droplets/sizes.py b/src/do_gradientai/resources/gpu_droplets/sizes.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/sizes.py rename to src/do_gradientai/resources/gpu_droplets/sizes.py diff --git a/src/gradientai/resources/gpu_droplets/snapshots.py b/src/do_gradientai/resources/gpu_droplets/snapshots.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/snapshots.py rename to src/do_gradientai/resources/gpu_droplets/snapshots.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/__init__.py b/src/do_gradientai/resources/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/__init__.py rename to src/do_gradientai/resources/gpu_droplets/volumes/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/actions.py b/src/do_gradientai/resources/gpu_droplets/volumes/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/actions.py rename to src/do_gradientai/resources/gpu_droplets/volumes/actions.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/snapshots.py rename to src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/volumes.py b/src/do_gradientai/resources/gpu_droplets/volumes/volumes.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/volumes.py rename to src/do_gradientai/resources/gpu_droplets/volumes/volumes.py diff --git a/src/gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py similarity index 100% rename from src/gradientai/resources/inference/__init__.py rename to src/do_gradientai/resources/inference/__init__.py diff --git a/src/gradientai/resources/inference/api_keys.py b/src/do_gradientai/resources/inference/api_keys.py similarity index 100% rename from src/gradientai/resources/inference/api_keys.py rename to src/do_gradientai/resources/inference/api_keys.py diff --git a/src/gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py similarity index 100% rename from src/gradientai/resources/inference/inference.py rename to src/do_gradientai/resources/inference/inference.py diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/__init__.py rename to src/do_gradientai/resources/knowledge_bases/__init__.py diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/do_gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/data_sources.py rename to src/do_gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/do_gradientai/resources/knowledge_bases/indexing_jobs.py diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/do_gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py similarity index 100% rename from src/gradientai/resources/models/__init__.py rename to src/do_gradientai/resources/models/__init__.py diff --git a/src/gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py similarity index 100% rename from src/gradientai/resources/models/models.py rename to src/do_gradientai/resources/models/models.py diff --git a/src/gradientai/resources/models/providers/__init__.py b/src/do_gradientai/resources/models/providers/__init__.py similarity index 100% rename from src/gradientai/resources/models/providers/__init__.py rename to src/do_gradientai/resources/models/providers/__init__.py diff --git a/src/gradientai/resources/models/providers/anthropic.py b/src/do_gradientai/resources/models/providers/anthropic.py similarity index 100% rename from src/gradientai/resources/models/providers/anthropic.py rename to src/do_gradientai/resources/models/providers/anthropic.py diff --git a/src/gradientai/resources/models/providers/openai.py b/src/do_gradientai/resources/models/providers/openai.py similarity index 100% rename from src/gradientai/resources/models/providers/openai.py rename to src/do_gradientai/resources/models/providers/openai.py diff --git a/src/gradientai/resources/models/providers/providers.py b/src/do_gradientai/resources/models/providers/providers.py similarity index 100% rename from src/gradientai/resources/models/providers/providers.py rename to src/do_gradientai/resources/models/providers/providers.py diff --git a/src/gradientai/resources/regions.py b/src/do_gradientai/resources/regions.py similarity index 100% rename from src/gradientai/resources/regions.py rename to src/do_gradientai/resources/regions.py diff --git a/src/gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py similarity index 100% rename from src/gradientai/types/__init__.py rename to src/do_gradientai/types/__init__.py diff --git a/src/gradientai/types/agent_create_params.py b/src/do_gradientai/types/agent_create_params.py similarity index 100% rename from src/gradientai/types/agent_create_params.py rename to src/do_gradientai/types/agent_create_params.py diff --git a/src/gradientai/types/agent_create_response.py b/src/do_gradientai/types/agent_create_response.py similarity index 100% rename from src/gradientai/types/agent_create_response.py rename to src/do_gradientai/types/agent_create_response.py diff --git a/src/gradientai/types/agent_delete_response.py b/src/do_gradientai/types/agent_delete_response.py similarity index 100% rename from src/gradientai/types/agent_delete_response.py rename to src/do_gradientai/types/agent_delete_response.py diff --git a/src/gradientai/types/agent_list_params.py b/src/do_gradientai/types/agent_list_params.py similarity index 100% rename from src/gradientai/types/agent_list_params.py rename to src/do_gradientai/types/agent_list_params.py diff --git a/src/gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py similarity index 100% rename from src/gradientai/types/agent_list_response.py rename to src/do_gradientai/types/agent_list_response.py diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/do_gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/gradientai/types/agent_retrieve_response.py rename to src/do_gradientai/types/agent_retrieve_response.py diff --git a/src/gradientai/types/agent_update_params.py b/src/do_gradientai/types/agent_update_params.py similarity index 100% rename from src/gradientai/types/agent_update_params.py rename to src/do_gradientai/types/agent_update_params.py diff --git a/src/gradientai/types/agent_update_response.py b/src/do_gradientai/types/agent_update_response.py similarity index 100% rename from src/gradientai/types/agent_update_response.py rename to src/do_gradientai/types/agent_update_response.py diff --git a/src/gradientai/types/agent_update_status_params.py b/src/do_gradientai/types/agent_update_status_params.py similarity index 100% rename from src/gradientai/types/agent_update_status_params.py rename to src/do_gradientai/types/agent_update_status_params.py diff --git a/src/gradientai/types/agent_update_status_response.py b/src/do_gradientai/types/agent_update_status_response.py similarity index 100% rename from src/gradientai/types/agent_update_status_response.py rename to src/do_gradientai/types/agent_update_status_response.py diff --git a/src/gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py similarity index 100% rename from src/gradientai/types/agents/__init__.py rename to src/do_gradientai/types/agents/__init__.py diff --git a/src/gradientai/types/agents/api_evaluation_metric.py b/src/do_gradientai/types/agents/api_evaluation_metric.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_metric.py rename to src/do_gradientai/types/agents/api_evaluation_metric.py diff --git a/src/gradientai/types/agents/api_evaluation_metric_result.py b/src/do_gradientai/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_metric_result.py rename to src/do_gradientai/types/agents/api_evaluation_metric_result.py diff --git a/src/gradientai/types/agents/api_evaluation_prompt.py b/src/do_gradientai/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_prompt.py rename to src/do_gradientai/types/agents/api_evaluation_prompt.py diff --git a/src/gradientai/types/agents/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_run.py rename to src/do_gradientai/types/agents/api_evaluation_run.py diff --git a/src/gradientai/types/agents/api_evaluation_test_case.py b/src/do_gradientai/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_test_case.py rename to src/do_gradientai/types/agents/api_evaluation_test_case.py diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/do_gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_params.py rename to src/do_gradientai/types/agents/api_key_create_params.py diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/do_gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_response.py rename to src/do_gradientai/types/agents/api_key_create_response.py diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/do_gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_delete_response.py rename to src/do_gradientai/types/agents/api_key_delete_response.py diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/do_gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_params.py rename to src/do_gradientai/types/agents/api_key_list_params.py diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_response.py rename to src/do_gradientai/types/agents/api_key_list_response.py diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/do_gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_regenerate_response.py rename to src/do_gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/do_gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_params.py rename to src/do_gradientai/types/agents/api_key_update_params.py diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/do_gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_response.py rename to src/do_gradientai/types/agents/api_key_update_response.py diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/do_gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/agents/api_link_knowledge_base_output.py rename to src/do_gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/agents/api_star_metric.py b/src/do_gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/gradientai/types/agents/api_star_metric.py rename to src/do_gradientai/types/agents/api_star_metric.py diff --git a/src/gradientai/types/agents/api_star_metric_param.py b/src/do_gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/gradientai/types/agents/api_star_metric_param.py rename to src/do_gradientai/types/agents/api_star_metric_param.py diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/do_gradientai/types/agents/chat/__init__.py similarity index 100% rename from src/gradientai/types/agents/chat/__init__.py rename to src/do_gradientai/types/agents/chat/__init__.py diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/do_gradientai/types/agents/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/agents/chat/completion_create_params.py rename to src/do_gradientai/types/agents/chat/completion_create_params.py diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/do_gradientai/types/agents/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/agents/chat/completion_create_response.py rename to src/do_gradientai/types/agents/chat/completion_create_response.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_regions_params.py rename to src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_regions_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/__init__.py rename to src/do_gradientai/types/agents/evaluation_metrics/__init__.py diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/model_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/gradientai/types/agents/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_create_params.py rename to src/do_gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/gradientai/types/agents/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_create_response.py rename to src/do_gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/gradientai/types/agents/evaluation_run_list_results_params.py b/src/do_gradientai/types/agents/evaluation_run_list_results_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_list_results_params.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_params.py diff --git a/src/gradientai/types/agents/evaluation_run_list_results_response.py b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_list_results_response.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_response.py diff --git a/src/gradientai/types/agents/evaluation_run_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_create_params.py b/src/do_gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_create_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_create_response.py b/src/do_gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_create_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_update_params.py b/src/do_gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_update_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_update_response.py b/src/do_gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_update_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/gradientai/types/agents/function_create_params.py b/src/do_gradientai/types/agents/function_create_params.py similarity index 100% rename from src/gradientai/types/agents/function_create_params.py rename to src/do_gradientai/types/agents/function_create_params.py diff --git a/src/gradientai/types/agents/function_create_response.py b/src/do_gradientai/types/agents/function_create_response.py similarity index 100% rename from src/gradientai/types/agents/function_create_response.py rename to src/do_gradientai/types/agents/function_create_response.py diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/do_gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/gradientai/types/agents/function_delete_response.py rename to src/do_gradientai/types/agents/function_delete_response.py diff --git a/src/gradientai/types/agents/function_update_params.py b/src/do_gradientai/types/agents/function_update_params.py similarity index 100% rename from src/gradientai/types/agents/function_update_params.py rename to src/do_gradientai/types/agents/function_update_params.py diff --git a/src/gradientai/types/agents/function_update_response.py b/src/do_gradientai/types/agents/function_update_response.py similarity index 100% rename from src/gradientai/types/agents/function_update_response.py rename to src/do_gradientai/types/agents/function_update_response.py diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/do_gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/agents/knowledge_base_detach_response.py rename to src/do_gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/agents/route_add_params.py b/src/do_gradientai/types/agents/route_add_params.py similarity index 100% rename from src/gradientai/types/agents/route_add_params.py rename to src/do_gradientai/types/agents/route_add_params.py diff --git a/src/gradientai/types/agents/route_add_response.py b/src/do_gradientai/types/agents/route_add_response.py similarity index 100% rename from src/gradientai/types/agents/route_add_response.py rename to src/do_gradientai/types/agents/route_add_response.py diff --git a/src/gradientai/types/agents/route_delete_response.py b/src/do_gradientai/types/agents/route_delete_response.py similarity index 100% rename from src/gradientai/types/agents/route_delete_response.py rename to src/do_gradientai/types/agents/route_delete_response.py diff --git a/src/gradientai/types/agents/route_update_params.py b/src/do_gradientai/types/agents/route_update_params.py similarity index 100% rename from src/gradientai/types/agents/route_update_params.py rename to src/do_gradientai/types/agents/route_update_params.py diff --git a/src/gradientai/types/agents/route_update_response.py b/src/do_gradientai/types/agents/route_update_response.py similarity index 100% rename from src/gradientai/types/agents/route_update_response.py rename to src/do_gradientai/types/agents/route_update_response.py diff --git a/src/gradientai/types/agents/route_view_response.py b/src/do_gradientai/types/agents/route_view_response.py similarity index 100% rename from src/gradientai/types/agents/route_view_response.py rename to src/do_gradientai/types/agents/route_view_response.py diff --git a/src/gradientai/types/agents/version_list_params.py b/src/do_gradientai/types/agents/version_list_params.py similarity index 100% rename from src/gradientai/types/agents/version_list_params.py rename to src/do_gradientai/types/agents/version_list_params.py diff --git a/src/gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py similarity index 100% rename from src/gradientai/types/agents/version_list_response.py rename to src/do_gradientai/types/agents/version_list_response.py diff --git a/src/gradientai/types/agents/version_update_params.py b/src/do_gradientai/types/agents/version_update_params.py similarity index 100% rename from src/gradientai/types/agents/version_update_params.py rename to src/do_gradientai/types/agents/version_update_params.py diff --git a/src/gradientai/types/agents/version_update_response.py b/src/do_gradientai/types/agents/version_update_response.py similarity index 100% rename from src/gradientai/types/agents/version_update_response.py rename to src/do_gradientai/types/agents/version_update_response.py diff --git a/src/gradientai/types/api_agent.py b/src/do_gradientai/types/api_agent.py similarity index 100% rename from src/gradientai/types/api_agent.py rename to src/do_gradientai/types/api_agent.py diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/do_gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/gradientai/types/api_agent_api_key_info.py rename to src/do_gradientai/types/api_agent_api_key_info.py diff --git a/src/gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py similarity index 100% rename from src/gradientai/types/api_agent_model.py rename to src/do_gradientai/types/api_agent_model.py diff --git a/src/gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py similarity index 100% rename from src/gradientai/types/api_agreement.py rename to src/do_gradientai/types/api_agreement.py diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/do_gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/gradientai/types/api_anthropic_api_key_info.py rename to src/do_gradientai/types/api_anthropic_api_key_info.py diff --git a/src/gradientai/types/api_deployment_visibility.py b/src/do_gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/gradientai/types/api_deployment_visibility.py rename to src/do_gradientai/types/api_deployment_visibility.py diff --git a/src/gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py similarity index 100% rename from src/gradientai/types/api_knowledge_base.py rename to src/do_gradientai/types/api_knowledge_base.py diff --git a/src/gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py similarity index 100% rename from src/gradientai/types/api_model.py rename to src/do_gradientai/types/api_model.py diff --git a/src/gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py similarity index 100% rename from src/gradientai/types/api_model_version.py rename to src/do_gradientai/types/api_model_version.py diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/do_gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/gradientai/types/api_openai_api_key_info.py rename to src/do_gradientai/types/api_openai_api_key_info.py diff --git a/src/gradientai/types/api_retrieval_method.py b/src/do_gradientai/types/api_retrieval_method.py similarity index 100% rename from src/gradientai/types/api_retrieval_method.py rename to src/do_gradientai/types/api_retrieval_method.py diff --git a/src/gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py similarity index 100% rename from src/gradientai/types/api_workspace.py rename to src/do_gradientai/types/api_workspace.py diff --git a/src/gradientai/types/chat/__init__.py b/src/do_gradientai/types/chat/__init__.py similarity index 100% rename from src/gradientai/types/chat/__init__.py rename to src/do_gradientai/types/chat/__init__.py diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/do_gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/chat/completion_create_params.py rename to src/do_gradientai/types/chat/completion_create_params.py diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/do_gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/chat/completion_create_response.py rename to src/do_gradientai/types/chat/completion_create_response.py diff --git a/src/gradientai/types/droplet_backup_policy.py b/src/do_gradientai/types/droplet_backup_policy.py similarity index 100% rename from src/gradientai/types/droplet_backup_policy.py rename to src/do_gradientai/types/droplet_backup_policy.py diff --git a/src/gradientai/types/droplet_backup_policy_param.py b/src/do_gradientai/types/droplet_backup_policy_param.py similarity index 100% rename from src/gradientai/types/droplet_backup_policy_param.py rename to src/do_gradientai/types/droplet_backup_policy_param.py diff --git a/src/gradientai/types/gpu_droplet_create_params.py b/src/do_gradientai/types/gpu_droplet_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_create_params.py rename to src/do_gradientai/types/gpu_droplet_create_params.py diff --git a/src/gradientai/types/gpu_droplet_create_response.py b/src/do_gradientai/types/gpu_droplet_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_create_response.py rename to src/do_gradientai/types/gpu_droplet_create_response.py diff --git a/src/gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_delete_by_tag_params.py rename to src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_params.py b/src/do_gradientai/types/gpu_droplet_list_firewalls_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_firewalls_params.py rename to src/do_gradientai/types/gpu_droplet_list_firewalls_params.py diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_response.py b/src/do_gradientai/types/gpu_droplet_list_firewalls_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_firewalls_response.py rename to src/do_gradientai/types/gpu_droplet_list_firewalls_response.py diff --git a/src/gradientai/types/gpu_droplet_list_kernels_params.py b/src/do_gradientai/types/gpu_droplet_list_kernels_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_kernels_params.py rename to src/do_gradientai/types/gpu_droplet_list_kernels_params.py diff --git a/src/gradientai/types/gpu_droplet_list_kernels_response.py b/src/do_gradientai/types/gpu_droplet_list_kernels_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_kernels_response.py rename to src/do_gradientai/types/gpu_droplet_list_kernels_response.py diff --git a/src/gradientai/types/gpu_droplet_list_neighbors_response.py b/src/do_gradientai/types/gpu_droplet_list_neighbors_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_neighbors_response.py rename to src/do_gradientai/types/gpu_droplet_list_neighbors_response.py diff --git a/src/gradientai/types/gpu_droplet_list_params.py b/src/do_gradientai/types/gpu_droplet_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_params.py rename to src/do_gradientai/types/gpu_droplet_list_params.py diff --git a/src/gradientai/types/gpu_droplet_list_response.py b/src/do_gradientai/types/gpu_droplet_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_response.py rename to src/do_gradientai/types/gpu_droplet_list_response.py diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_params.py b/src/do_gradientai/types/gpu_droplet_list_snapshots_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_snapshots_params.py rename to src/do_gradientai/types/gpu_droplet_list_snapshots_params.py diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_response.py b/src/do_gradientai/types/gpu_droplet_list_snapshots_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_snapshots_response.py rename to src/do_gradientai/types/gpu_droplet_list_snapshots_response.py diff --git a/src/gradientai/types/gpu_droplet_retrieve_response.py b/src/do_gradientai/types/gpu_droplet_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_retrieve_response.py rename to src/do_gradientai/types/gpu_droplet_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/__init__.py b/src/do_gradientai/types/gpu_droplets/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/__init__.py rename to src/do_gradientai/types/gpu_droplets/__init__.py diff --git a/src/gradientai/types/gpu_droplets/account/__init__.py b/src/do_gradientai/types/gpu_droplets/account/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/__init__.py rename to src/do_gradientai/types/gpu_droplets/account/__init__.py diff --git a/src/gradientai/types/gpu_droplets/account/key_create_params.py b/src/do_gradientai/types/gpu_droplets/account/key_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_create_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_create_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_create_response.py b/src/do_gradientai/types/gpu_droplets/account/key_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_create_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_create_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_list_params.py b/src/do_gradientai/types/gpu_droplets/account/key_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_list_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_list_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_list_response.py b/src/do_gradientai/types/gpu_droplets/account/key_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_list_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_list_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_update_params.py b/src/do_gradientai/types/gpu_droplets/account/key_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_update_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_update_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_update_response.py b/src/do_gradientai/types/gpu_droplets/account/key_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_update_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_update_response.py diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py rename to src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py rename to src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py diff --git a/src/gradientai/types/gpu_droplets/action_initiate_params.py b/src/do_gradientai/types/gpu_droplets/action_initiate_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_initiate_params.py rename to src/do_gradientai/types/gpu_droplets/action_initiate_params.py diff --git a/src/gradientai/types/gpu_droplets/action_initiate_response.py b/src/do_gradientai/types/gpu_droplets/action_initiate_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_initiate_response.py rename to src/do_gradientai/types/gpu_droplets/action_initiate_response.py diff --git a/src/gradientai/types/gpu_droplets/action_list_params.py b/src/do_gradientai/types/gpu_droplets/action_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_list_params.py rename to src/do_gradientai/types/gpu_droplets/action_list_params.py diff --git a/src/gradientai/types/gpu_droplets/action_list_response.py b/src/do_gradientai/types/gpu_droplets/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/associated_resource.py b/src/do_gradientai/types/gpu_droplets/associated_resource.py similarity index 100% rename from src/gradientai/types/gpu_droplets/associated_resource.py rename to src/do_gradientai/types/gpu_droplets/associated_resource.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_create_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_create_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_create_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_create_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_history_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_history_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_members_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_members_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_update_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_update_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_update_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_update_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_params.py b/src/do_gradientai/types/gpu_droplets/backup_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_params.py rename to src/do_gradientai/types/gpu_droplets/backup_list_params.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_policies_params.py rename to src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_policies_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py rename to src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py diff --git a/src/gradientai/types/gpu_droplets/current_utilization.py b/src/do_gradientai/types/gpu_droplets/current_utilization.py similarity index 100% rename from src/gradientai/types/gpu_droplets/current_utilization.py rename to src/do_gradientai/types/gpu_droplets/current_utilization.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py diff --git a/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroyed_associated_resource.py rename to src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py diff --git a/src/gradientai/types/gpu_droplets/domains.py b/src/do_gradientai/types/gpu_droplets/domains.py similarity index 100% rename from src/gradientai/types/gpu_droplets/domains.py rename to src/do_gradientai/types/gpu_droplets/domains.py diff --git a/src/gradientai/types/gpu_droplets/domains_param.py b/src/do_gradientai/types/gpu_droplets/domains_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/domains_param.py rename to src/do_gradientai/types/gpu_droplets/domains_param.py diff --git a/src/gradientai/types/gpu_droplets/firewall.py b/src/do_gradientai/types/gpu_droplets/firewall.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall.py rename to src/do_gradientai/types/gpu_droplets/firewall.py diff --git a/src/gradientai/types/gpu_droplets/firewall_create_params.py b/src/do_gradientai/types/gpu_droplets/firewall_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_create_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_create_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_create_response.py b/src/do_gradientai/types/gpu_droplets/firewall_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_create_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_create_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_list_params.py b/src/do_gradientai/types/gpu_droplets/firewall_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_list_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_list_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_list_response.py b/src/do_gradientai/types/gpu_droplets/firewall_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_list_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_list_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_param.py b/src/do_gradientai/types/gpu_droplets/firewall_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_param.py rename to src/do_gradientai/types/gpu_droplets/firewall_param.py diff --git a/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_update_params.py b/src/do_gradientai/types/gpu_droplets/firewall_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_update_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_update_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_update_response.py b/src/do_gradientai/types/gpu_droplets/firewall_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_update_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_update_response.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/__init__.py b/src/do_gradientai/types/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/__init__.py rename to src/do_gradientai/types/gpu_droplets/firewalls/__init__.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip.py b/src/do_gradientai/types/gpu_droplets/floating_ip.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip.py rename to src/do_gradientai/types/gpu_droplets/floating_ip.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_create_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_create_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_list_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_list_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/__init__.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule.py b/src/do_gradientai/types/gpu_droplets/forwarding_rule.py similarity index 100% rename from src/gradientai/types/gpu_droplets/forwarding_rule.py rename to src/do_gradientai/types/gpu_droplets/forwarding_rule.py diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/forwarding_rule_param.py rename to src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py diff --git a/src/gradientai/types/gpu_droplets/glb_settings.py b/src/do_gradientai/types/gpu_droplets/glb_settings.py similarity index 100% rename from src/gradientai/types/gpu_droplets/glb_settings.py rename to src/do_gradientai/types/gpu_droplets/glb_settings.py diff --git a/src/gradientai/types/gpu_droplets/glb_settings_param.py b/src/do_gradientai/types/gpu_droplets/glb_settings_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/glb_settings_param.py rename to src/do_gradientai/types/gpu_droplets/glb_settings_param.py diff --git a/src/gradientai/types/gpu_droplets/health_check.py b/src/do_gradientai/types/gpu_droplets/health_check.py similarity index 100% rename from src/gradientai/types/gpu_droplets/health_check.py rename to src/do_gradientai/types/gpu_droplets/health_check.py diff --git a/src/gradientai/types/gpu_droplets/health_check_param.py b/src/do_gradientai/types/gpu_droplets/health_check_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/health_check_param.py rename to src/do_gradientai/types/gpu_droplets/health_check_param.py diff --git a/src/gradientai/types/gpu_droplets/image_create_params.py b/src/do_gradientai/types/gpu_droplets/image_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_create_params.py rename to src/do_gradientai/types/gpu_droplets/image_create_params.py diff --git a/src/gradientai/types/gpu_droplets/image_create_response.py b/src/do_gradientai/types/gpu_droplets/image_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_create_response.py rename to src/do_gradientai/types/gpu_droplets/image_create_response.py diff --git a/src/gradientai/types/gpu_droplets/image_list_params.py b/src/do_gradientai/types/gpu_droplets/image_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_list_params.py rename to src/do_gradientai/types/gpu_droplets/image_list_params.py diff --git a/src/gradientai/types/gpu_droplets/image_list_response.py b/src/do_gradientai/types/gpu_droplets/image_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_list_response.py rename to src/do_gradientai/types/gpu_droplets/image_list_response.py diff --git a/src/gradientai/types/gpu_droplets/image_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/image_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/image_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/image_update_params.py b/src/do_gradientai/types/gpu_droplets/image_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_update_params.py rename to src/do_gradientai/types/gpu_droplets/image_update_params.py diff --git a/src/gradientai/types/gpu_droplets/image_update_response.py b/src/do_gradientai/types/gpu_droplets/image_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_update_response.py rename to src/do_gradientai/types/gpu_droplets/image_update_response.py diff --git a/src/gradientai/types/gpu_droplets/images/__init__.py b/src/do_gradientai/types/gpu_droplets/images/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/__init__.py rename to src/do_gradientai/types/gpu_droplets/images/__init__.py diff --git a/src/gradientai/types/gpu_droplets/images/action_create_params.py b/src/do_gradientai/types/gpu_droplets/images/action_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/action_create_params.py rename to src/do_gradientai/types/gpu_droplets/images/action_create_params.py diff --git a/src/gradientai/types/gpu_droplets/images/action_list_response.py b/src/do_gradientai/types/gpu_droplets/images/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/images/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/lb_firewall.py b/src/do_gradientai/types/gpu_droplets/lb_firewall.py similarity index 100% rename from src/gradientai/types/gpu_droplets/lb_firewall.py rename to src/do_gradientai/types/gpu_droplets/lb_firewall.py diff --git a/src/gradientai/types/gpu_droplets/lb_firewall_param.py b/src/do_gradientai/types/gpu_droplets/lb_firewall_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/lb_firewall_param.py rename to src/do_gradientai/types/gpu_droplets/lb_firewall_param.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer.py b/src/do_gradientai/types/gpu_droplets/load_balancer.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer.py rename to src/do_gradientai/types/gpu_droplets/load_balancer.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_create_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_create_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_list_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_list_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_update_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_update_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/__init__.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/size_list_params.py b/src/do_gradientai/types/gpu_droplets/size_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/size_list_params.py rename to src/do_gradientai/types/gpu_droplets/size_list_params.py diff --git a/src/gradientai/types/gpu_droplets/size_list_response.py b/src/do_gradientai/types/gpu_droplets/size_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/size_list_response.py rename to src/do_gradientai/types/gpu_droplets/size_list_response.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_params.py b/src/do_gradientai/types/gpu_droplets/snapshot_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_list_params.py rename to src/do_gradientai/types/gpu_droplets/snapshot_list_params.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_response.py b/src/do_gradientai/types/gpu_droplets/snapshot_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_list_response.py rename to src/do_gradientai/types/gpu_droplets/snapshot_list_response.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions.py b/src/do_gradientai/types/gpu_droplets/sticky_sessions.py similarity index 100% rename from src/gradientai/types/gpu_droplets/sticky_sessions.py rename to src/do_gradientai/types/gpu_droplets/sticky_sessions.py diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/sticky_sessions_param.py rename to src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py diff --git a/src/gradientai/types/gpu_droplets/volume_create_params.py b/src/do_gradientai/types/gpu_droplets/volume_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_create_params.py rename to src/do_gradientai/types/gpu_droplets/volume_create_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_create_response.py b/src/do_gradientai/types/gpu_droplets/volume_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_create_response.py rename to src/do_gradientai/types/gpu_droplets/volume_create_response.py diff --git a/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py rename to src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_list_params.py b/src/do_gradientai/types/gpu_droplets/volume_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_list_params.py rename to src/do_gradientai/types/gpu_droplets/volume_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_list_response.py b/src/do_gradientai/types/gpu_droplets/volume_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_list_response.py rename to src/do_gradientai/types/gpu_droplets/volume_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/__init__.py b/src/do_gradientai/types/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/__init__.py rename to src/do_gradientai/types/gpu_droplets/volumes/__init__.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_list_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/volume_action.py b/src/do_gradientai/types/gpu_droplets/volumes/volume_action.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/volume_action.py rename to src/do_gradientai/types/gpu_droplets/volumes/volume_action.py diff --git a/src/gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py similarity index 100% rename from src/gradientai/types/inference/__init__.py rename to src/do_gradientai/types/inference/__init__.py diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/do_gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_params.py rename to src/do_gradientai/types/inference/api_key_create_params.py diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/do_gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_response.py rename to src/do_gradientai/types/inference/api_key_create_response.py diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/do_gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_delete_response.py rename to src/do_gradientai/types/inference/api_key_delete_response.py diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/do_gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_params.py rename to src/do_gradientai/types/inference/api_key_list_params.py diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_response.py rename to src/do_gradientai/types/inference/api_key_list_response.py diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/do_gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_params.py rename to src/do_gradientai/types/inference/api_key_update_params.py diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/do_gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_regenerate_response.py rename to src/do_gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/do_gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_response.py rename to src/do_gradientai/types/inference/api_key_update_response.py diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/do_gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/gradientai/types/inference/api_model_api_key_info.py rename to src/do_gradientai/types/inference/api_model_api_key_info.py diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/do_gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_params.py rename to src/do_gradientai/types/knowledge_base_create_params.py diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/do_gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_response.py rename to src/do_gradientai/types/knowledge_base_create_response.py diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/do_gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_delete_response.py rename to src/do_gradientai/types/knowledge_base_delete_response.py diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/do_gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_params.py rename to src/do_gradientai/types/knowledge_base_list_params.py diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_response.py rename to src/do_gradientai/types/knowledge_base_list_response.py diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/do_gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_retrieve_response.py rename to src/do_gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/do_gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_params.py rename to src/do_gradientai/types/knowledge_base_update_params.py diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/do_gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_response.py rename to src/do_gradientai/types/knowledge_base_update_response.py diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/types/knowledge_bases/__init__.py rename to src/do_gradientai/types/knowledge_bases/__init__.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_indexing_job.py rename to src/do_gradientai/types/knowledge_bases/api_indexing_job.py diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/do_gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/do_gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/do_gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py similarity index 100% rename from src/gradientai/types/model_list_response.py rename to src/do_gradientai/types/model_list_response.py diff --git a/src/gradientai/types/model_retrieve_response.py b/src/do_gradientai/types/model_retrieve_response.py similarity index 100% rename from src/gradientai/types/model_retrieve_response.py rename to src/do_gradientai/types/model_retrieve_response.py diff --git a/src/gradientai/types/models/__init__.py b/src/do_gradientai/types/models/__init__.py similarity index 100% rename from src/gradientai/types/models/__init__.py rename to src/do_gradientai/types/models/__init__.py diff --git a/src/gradientai/types/models/providers/__init__.py b/src/do_gradientai/types/models/providers/__init__.py similarity index 100% rename from src/gradientai/types/models/providers/__init__.py rename to src/do_gradientai/types/models/providers/__init__.py diff --git a/src/gradientai/types/models/providers/anthropic_create_params.py b/src/do_gradientai/types/models/providers/anthropic_create_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_create_params.py rename to src/do_gradientai/types/models/providers/anthropic_create_params.py diff --git a/src/gradientai/types/models/providers/anthropic_create_response.py b/src/do_gradientai/types/models/providers/anthropic_create_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_create_response.py rename to src/do_gradientai/types/models/providers/anthropic_create_response.py diff --git a/src/gradientai/types/models/providers/anthropic_delete_response.py b/src/do_gradientai/types/models/providers/anthropic_delete_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_delete_response.py rename to src/do_gradientai/types/models/providers/anthropic_delete_response.py diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_params.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_agents_params.py rename to src/do_gradientai/types/models/providers/anthropic_list_agents_params.py diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_response.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_agents_response.py rename to src/do_gradientai/types/models/providers/anthropic_list_agents_response.py diff --git a/src/gradientai/types/models/providers/anthropic_list_params.py b/src/do_gradientai/types/models/providers/anthropic_list_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_params.py rename to src/do_gradientai/types/models/providers/anthropic_list_params.py diff --git a/src/gradientai/types/models/providers/anthropic_list_response.py b/src/do_gradientai/types/models/providers/anthropic_list_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_response.py rename to src/do_gradientai/types/models/providers/anthropic_list_response.py diff --git a/src/gradientai/types/models/providers/anthropic_retrieve_response.py b/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_retrieve_response.py rename to src/do_gradientai/types/models/providers/anthropic_retrieve_response.py diff --git a/src/gradientai/types/models/providers/anthropic_update_params.py b/src/do_gradientai/types/models/providers/anthropic_update_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_update_params.py rename to src/do_gradientai/types/models/providers/anthropic_update_params.py diff --git a/src/gradientai/types/models/providers/anthropic_update_response.py b/src/do_gradientai/types/models/providers/anthropic_update_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_update_response.py rename to src/do_gradientai/types/models/providers/anthropic_update_response.py diff --git a/src/gradientai/types/models/providers/openai_create_params.py b/src/do_gradientai/types/models/providers/openai_create_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_create_params.py rename to src/do_gradientai/types/models/providers/openai_create_params.py diff --git a/src/gradientai/types/models/providers/openai_create_response.py b/src/do_gradientai/types/models/providers/openai_create_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_create_response.py rename to src/do_gradientai/types/models/providers/openai_create_response.py diff --git a/src/gradientai/types/models/providers/openai_delete_response.py b/src/do_gradientai/types/models/providers/openai_delete_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_delete_response.py rename to src/do_gradientai/types/models/providers/openai_delete_response.py diff --git a/src/gradientai/types/models/providers/openai_list_params.py b/src/do_gradientai/types/models/providers/openai_list_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_list_params.py rename to src/do_gradientai/types/models/providers/openai_list_params.py diff --git a/src/gradientai/types/models/providers/openai_list_response.py b/src/do_gradientai/types/models/providers/openai_list_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_list_response.py rename to src/do_gradientai/types/models/providers/openai_list_response.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_agents_params.py rename to src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_agents_response.py rename to src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_response.py rename to src/do_gradientai/types/models/providers/openai_retrieve_response.py diff --git a/src/gradientai/types/models/providers/openai_update_params.py b/src/do_gradientai/types/models/providers/openai_update_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_update_params.py rename to src/do_gradientai/types/models/providers/openai_update_params.py diff --git a/src/gradientai/types/models/providers/openai_update_response.py b/src/do_gradientai/types/models/providers/openai_update_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_update_response.py rename to src/do_gradientai/types/models/providers/openai_update_response.py diff --git a/src/gradientai/types/region_list_params.py b/src/do_gradientai/types/region_list_params.py similarity index 100% rename from src/gradientai/types/region_list_params.py rename to src/do_gradientai/types/region_list_params.py diff --git a/src/gradientai/types/region_list_response.py b/src/do_gradientai/types/region_list_response.py similarity index 100% rename from src/gradientai/types/region_list_response.py rename to src/do_gradientai/types/region_list_response.py diff --git a/src/gradientai/types/shared/__init__.py b/src/do_gradientai/types/shared/__init__.py similarity index 100% rename from src/gradientai/types/shared/__init__.py rename to src/do_gradientai/types/shared/__init__.py diff --git a/src/gradientai/types/shared/action.py b/src/do_gradientai/types/shared/action.py similarity index 100% rename from src/gradientai/types/shared/action.py rename to src/do_gradientai/types/shared/action.py diff --git a/src/gradientai/types/shared/action_link.py b/src/do_gradientai/types/shared/action_link.py similarity index 100% rename from src/gradientai/types/shared/action_link.py rename to src/do_gradientai/types/shared/action_link.py diff --git a/src/gradientai/types/shared/api_links.py b/src/do_gradientai/types/shared/api_links.py similarity index 100% rename from src/gradientai/types/shared/api_links.py rename to src/do_gradientai/types/shared/api_links.py diff --git a/src/gradientai/types/shared/api_meta.py b/src/do_gradientai/types/shared/api_meta.py similarity index 100% rename from src/gradientai/types/shared/api_meta.py rename to src/do_gradientai/types/shared/api_meta.py diff --git a/src/gradientai/types/shared/backward_links.py b/src/do_gradientai/types/shared/backward_links.py similarity index 100% rename from src/gradientai/types/shared/backward_links.py rename to src/do_gradientai/types/shared/backward_links.py diff --git a/src/gradientai/types/shared/chat_completion_chunk.py b/src/do_gradientai/types/shared/chat_completion_chunk.py similarity index 100% rename from src/gradientai/types/shared/chat_completion_chunk.py rename to src/do_gradientai/types/shared/chat_completion_chunk.py diff --git a/src/gradientai/types/shared/chat_completion_token_logprob.py b/src/do_gradientai/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/gradientai/types/shared/chat_completion_token_logprob.py rename to src/do_gradientai/types/shared/chat_completion_token_logprob.py diff --git a/src/gradientai/types/shared/completion_usage.py b/src/do_gradientai/types/shared/completion_usage.py similarity index 100% rename from src/gradientai/types/shared/completion_usage.py rename to src/do_gradientai/types/shared/completion_usage.py diff --git a/src/gradientai/types/shared/disk_info.py b/src/do_gradientai/types/shared/disk_info.py similarity index 100% rename from src/gradientai/types/shared/disk_info.py rename to src/do_gradientai/types/shared/disk_info.py diff --git a/src/gradientai/types/shared/droplet.py b/src/do_gradientai/types/shared/droplet.py similarity index 100% rename from src/gradientai/types/shared/droplet.py rename to src/do_gradientai/types/shared/droplet.py diff --git a/src/gradientai/types/shared/droplet_next_backup_window.py b/src/do_gradientai/types/shared/droplet_next_backup_window.py similarity index 100% rename from src/gradientai/types/shared/droplet_next_backup_window.py rename to src/do_gradientai/types/shared/droplet_next_backup_window.py diff --git a/src/gradientai/types/shared/firewall_rule_target.py b/src/do_gradientai/types/shared/firewall_rule_target.py similarity index 100% rename from src/gradientai/types/shared/firewall_rule_target.py rename to src/do_gradientai/types/shared/firewall_rule_target.py diff --git a/src/gradientai/types/shared/forward_links.py b/src/do_gradientai/types/shared/forward_links.py similarity index 100% rename from src/gradientai/types/shared/forward_links.py rename to src/do_gradientai/types/shared/forward_links.py diff --git a/src/gradientai/types/shared/garbage_collection.py b/src/do_gradientai/types/shared/garbage_collection.py similarity index 100% rename from src/gradientai/types/shared/garbage_collection.py rename to src/do_gradientai/types/shared/garbage_collection.py diff --git a/src/gradientai/types/shared/gpu_info.py b/src/do_gradientai/types/shared/gpu_info.py similarity index 100% rename from src/gradientai/types/shared/gpu_info.py rename to src/do_gradientai/types/shared/gpu_info.py diff --git a/src/gradientai/types/shared/image.py b/src/do_gradientai/types/shared/image.py similarity index 100% rename from src/gradientai/types/shared/image.py rename to src/do_gradientai/types/shared/image.py diff --git a/src/gradientai/types/shared/kernel.py b/src/do_gradientai/types/shared/kernel.py similarity index 100% rename from src/gradientai/types/shared/kernel.py rename to src/do_gradientai/types/shared/kernel.py diff --git a/src/gradientai/types/shared/meta_properties.py b/src/do_gradientai/types/shared/meta_properties.py similarity index 100% rename from src/gradientai/types/shared/meta_properties.py rename to src/do_gradientai/types/shared/meta_properties.py diff --git a/src/gradientai/types/shared/network_v4.py b/src/do_gradientai/types/shared/network_v4.py similarity index 100% rename from src/gradientai/types/shared/network_v4.py rename to src/do_gradientai/types/shared/network_v4.py diff --git a/src/gradientai/types/shared/network_v6.py b/src/do_gradientai/types/shared/network_v6.py similarity index 100% rename from src/gradientai/types/shared/network_v6.py rename to src/do_gradientai/types/shared/network_v6.py diff --git a/src/gradientai/types/shared/page_links.py b/src/do_gradientai/types/shared/page_links.py similarity index 100% rename from src/gradientai/types/shared/page_links.py rename to src/do_gradientai/types/shared/page_links.py diff --git a/src/gradientai/types/shared/region.py b/src/do_gradientai/types/shared/region.py similarity index 100% rename from src/gradientai/types/shared/region.py rename to src/do_gradientai/types/shared/region.py diff --git a/src/gradientai/types/shared/size.py b/src/do_gradientai/types/shared/size.py similarity index 100% rename from src/gradientai/types/shared/size.py rename to src/do_gradientai/types/shared/size.py diff --git a/src/gradientai/types/shared/snapshots.py b/src/do_gradientai/types/shared/snapshots.py similarity index 100% rename from src/gradientai/types/shared/snapshots.py rename to src/do_gradientai/types/shared/snapshots.py diff --git a/src/gradientai/types/shared/subscription.py b/src/do_gradientai/types/shared/subscription.py similarity index 100% rename from src/gradientai/types/shared/subscription.py rename to src/do_gradientai/types/shared/subscription.py diff --git a/src/gradientai/types/shared/subscription_tier_base.py b/src/do_gradientai/types/shared/subscription_tier_base.py similarity index 100% rename from src/gradientai/types/shared/subscription_tier_base.py rename to src/do_gradientai/types/shared/subscription_tier_base.py diff --git a/src/gradientai/types/shared/vpc_peering.py b/src/do_gradientai/types/shared/vpc_peering.py similarity index 100% rename from src/gradientai/types/shared/vpc_peering.py rename to src/do_gradientai/types/shared/vpc_peering.py diff --git a/src/gradientai/types/shared_params/__init__.py b/src/do_gradientai/types/shared_params/__init__.py similarity index 100% rename from src/gradientai/types/shared_params/__init__.py rename to src/do_gradientai/types/shared_params/__init__.py diff --git a/src/gradientai/types/shared_params/firewall_rule_target.py b/src/do_gradientai/types/shared_params/firewall_rule_target.py similarity index 100% rename from src/gradientai/types/shared_params/firewall_rule_target.py rename to src/do_gradientai/types/shared_params/firewall_rule_target.py diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index dc13cd85..d80b5c09 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 6b8f8bc7..27ab4a27 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ModelListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index ea39c474..2728393e 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 635721b3..37d39018 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics.workspaces import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index c29511f5..1e5275fe 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 0413591e..56edd598 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index d64367ae..303d85d6 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 2ea44e6b..9d443f16 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index e9083ba3..ae986abc 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 4390d1d2..624446e0 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 2ac20d89..7ac99316 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index d04e8c90..256a4757 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index d6151470..158856ed 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 46c8b431..95b02106 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py index acad3575..cf168f61 100644 --- a/tests/api_resources/gpu_droplets/account/test_keys.py +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.account import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.account import ( KeyListResponse, KeyCreateResponse, KeyUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py index 67d132aa..819a5e6e 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py index 446a11af..b2eab40c 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_rules.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py index a0227c61..25c9362b 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_tags.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index 82a12d2e..ad26db8a 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.floating_ips import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.floating_ips import ( ActionListResponse, ActionCreateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index 4d59c85b..35861bcb 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -7,10 +7,10 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.shared import Action -from gradientai.types.gpu_droplets.images import ActionListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.shared import Action +from do_gradientai.types.gpu_droplets.images import ActionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py index 333567f4..f22213e2 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py index ec6f7838..d53bd0db 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py index 5e443dd8..74e45b44 100644 --- a/tests/api_resources/gpu_droplets/test_actions.py +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( ActionListResponse, ActionInitiateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index 42164666..cec0371d 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( AutoscaleListResponse, AutoscaleCreateResponse, AutoscaleUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index f8f72140..334c701f 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupRetrievePolicyResponse, diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index b6922feb..2aef1fce 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( DestroyWithAssociatedResourceListResponse, DestroyWithAssociatedResourceCheckStatusResponse, ) diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 537fe7d2..6d98ebe8 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( FirewallListResponse, FirewallCreateResponse, FirewallUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index 830e9b39..9b8b3183 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( FloatingIPListResponse, FloatingIPCreateResponse, FloatingIPRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index 7be6a786..5a2a7c0c 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( ImageListResponse, ImageCreateResponse, ImageUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index c1ce1ce2..b96c6d52 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( LoadBalancerListResponse, LoadBalancerCreateResponse, LoadBalancerUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py index eda73b1e..1ff11cd7 100644 --- a/tests/api_resources/gpu_droplets/test_sizes.py +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import SizeListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import SizeListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py index 5d7132c2..413dd993 100644 --- a/tests/api_resources/gpu_droplets/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index 64bcb4c5..baf6b430 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( VolumeListResponse, VolumeCreateResponse, VolumeRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py index d5338c97..40d9b4eb 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_actions.py +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.volumes import ( ActionListResponse, ActionRetrieveResponse, ActionInitiateByIDResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index 8b72305c..4884d372 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.volumes import ( SnapshotListResponse, SnapshotCreateResponse, SnapshotRetrieveResponse, diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 157a2e3d..85ad49da 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 55b056b8..ebb0841a 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index ed32d7f8..b0185941 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index c61a97ea..6b3d99a3 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.models.providers import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.models.providers import ( AnthropicListResponse, AnthropicCreateResponse, AnthropicDeleteResponse, diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 7fde1a69..bdde97ca 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.models.providers import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.models.providers import ( OpenAIListResponse, OpenAICreateResponse, OpenAIDeleteResponse, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 8a6a7d69..2f68a06f 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index 22f3d2d0..cbc7e63b 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( GPUDropletListResponse, GPUDropletCreateResponse, GPUDropletRetrieveResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 8a331b52..c4d179cc 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index fe837973..803c5d5a 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse, ModelRetrieveResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 4f232293..f331342e 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import RegionListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/conftest.py b/tests/conftest.py index 5b24e1c2..1e102b94 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from gradientai._utils import is_dict +from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from do_gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("gradientai").setLevel(logging.DEBUG) +logging.getLogger("do_gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index 61013a0a..85b7d31a 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,12 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from gradientai._types import Omit -from gradientai._models import BaseModel, FinalRequestOptions -from gradientai._streaming import Stream, AsyncStream -from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from gradientai._base_client import ( +from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from do_gradientai._types import Omit +from do_gradientai._models import BaseModel, FinalRequestOptions +from do_gradientai._streaming import Stream, AsyncStream +from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from do_gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -259,10 +259,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -873,7 +873,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -891,7 +891,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) @@ -909,7 +909,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -948,7 +948,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -980,7 +980,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1261,10 +1261,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1880,7 +1880,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1900,7 +1900,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1920,7 +1920,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1960,7 +1960,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1993,7 +1993,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -2036,8 +2036,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from gradientai._utils import asyncify - from gradientai._base_client import get_platform + from do_gradientai._utils import asyncify + from do_gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 9d1579a8..5a98ce1b 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from gradientai._utils import deepcopy_minimal +from do_gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 2905d59c..341e65ae 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from gradientai._types import FileTypes -from gradientai._utils import extract_files +from do_gradientai._types import FileTypes +from do_gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index 4a723313..ff7914bb 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from gradientai._files import to_httpx_files, async_to_httpx_files +from do_gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 3a857584..bfbef61a 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from gradientai._utils import PropertyInfo -from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from gradientai._models import BaseModel, construct_type +from do_gradientai._utils import PropertyInfo +from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from do_gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index 9080377b..c9213571 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from gradientai._qs import Querystring, stringify +from do_gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index c4e6b9d8..434e9491 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from gradientai._utils import required_args +from do_gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 1a8f241e..001ce776 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from gradientai import BaseModel, GradientAI, AsyncGradientAI -from gradientai._response import ( +from do_gradientai import BaseModel, GradientAI, AsyncGradientAI +from do_gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from gradientai._streaming import Stream -from gradientai._base_client import FinalRequestOptions +from do_gradientai._streaming import Stream +from do_gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index cdb41a77..c1ce8e85 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from gradientai import GradientAI, AsyncGradientAI -from gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 825fe048..30c06d6a 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from gradientai._types import NOT_GIVEN, Base64FileInput -from gradientai._utils import ( +from do_gradientai._types import NOT_GIVEN, Base64FileInput +from do_gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from gradientai._compat import PYDANTIC_V2 -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2 +from do_gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 3856b2c9..9ce2e0d3 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from gradientai._utils import LazyProxy +from do_gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 66ad064f..c9129fdc 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from gradientai._utils import extract_type_var_from_base +from do_gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index b539ed2c..9def7c60 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from gradientai._types import Omit, NoneType -from gradientai._utils import ( +from do_gradientai._types import Omit, NoneType +from do_gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from do_gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From 59b26fc32acb749e2992255dea301eeacbf51ca8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:32:33 +0000 Subject: [PATCH 119/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b0699969..08e82c45 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.14" + ".": "0.1.0-alpha.15" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 7a93ec8c..18b7e723 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.14" +version = "0.1.0-alpha.15" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index d69cef74..d0c1c939 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.14" # x-release-please-version +__version__ = "0.1.0-alpha.15" # x-release-please-version From 2d8afe33cafd02ad3795eb1e2ebcd66098a26b4a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:52:32 +0000 Subject: [PATCH 120/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 08e82c45..7e56fe29 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.15" + ".": "0.1.0-alpha.16" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 18b7e723..bc38f8e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.15" +version = "0.1.0-alpha.16" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index d0c1c939..314b16ff 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.15" # x-release-please-version +__version__ = "0.1.0-alpha.16" # x-release-please-version From ee8736a49fe3052cab3b4e6a491df8bf4d365308 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 21:52:05 +0000 Subject: [PATCH 121/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7e56fe29..e2f2c074 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.16" + ".": "0.1.0-alpha.17" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index bc38f8e9..a27f98cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.16" +version = "0.1.0-alpha.17" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index 314b16ff..b56314e9 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.16" # x-release-please-version +__version__ = "0.1.0-alpha.17" # x-release-please-version From 1056f586ebaef08d43c1548e409457ffadae53cd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 21:55:37 +0000 Subject: [PATCH 122/200] chore: update pypi package name --- .stats.yml | 2 +- README.md | 6 +++--- pyproject.toml | 2 +- requirements-dev.lock | 16 ++++++++-------- requirements.lock | 16 ++++++++-------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.stats.yml b/.stats.yml index c2a4edd2..d3212787 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 47020371e6a9b81bd2a937e1c587f774 +config_hash: b2605c7778e32305d2a6ce66bb4aa830 diff --git a/README.md b/README.md index 0cfdfb76..df2a09de 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Gradient AI Python API library -[![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg?label=pypi%20(stable))](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) +[![PyPI version](https://img.shields.io/pypi/v/do_gradientai.svg?label=pypi%20(stable))](https://pypi.org/project/do_gradientai/) The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -17,7 +17,7 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ```sh # install from PyPI -pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python +pip install --pre do_gradientai ``` ## Usage @@ -89,7 +89,7 @@ You can enable this by installing `aiohttp`: ```sh # install from PyPI -pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python[aiohttp] +pip install --pre do_gradientai[aiohttp] ``` Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: diff --git a/pyproject.toml b/pyproject.toml index a27f98cb..3e8561eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" +name = "do_gradientai" version = "0.1.0-alpha.17" description = "The official Python library for the GradientAI API" dynamic = ["readme"] diff --git a/requirements-dev.lock b/requirements-dev.lock index 94875b2e..f839fd0e 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,14 +13,14 @@ aiohappyeyeballs==2.6.1 # via aiohttp aiohttp==3.12.8 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via httpx-aiohttp aiosignal==1.3.2 # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via httpx argcomplete==3.1.2 # via nox @@ -37,7 +37,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai exceptiongroup==1.2.2 # via anyio # via pytest @@ -53,11 +53,11 @@ h11==0.16.0 httpcore==1.0.9 # via httpx httpx==0.28.1 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via httpx-aiohttp # via respx httpx-aiohttp==0.1.8 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai idna==3.4 # via anyio # via httpx @@ -90,7 +90,7 @@ propcache==0.3.1 # via aiohttp # via yarl pydantic==2.10.3 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -114,14 +114,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via multidict # via mypy # via pydantic diff --git a/requirements.lock b/requirements.lock index b16bfc5e..33a3cfb2 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,14 +13,14 @@ aiohappyeyeballs==2.6.1 # via aiohttp aiohttp==3.12.8 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via httpx-aiohttp aiosignal==1.3.2 # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via httpx async-timeout==5.0.1 # via aiohttp @@ -30,7 +30,7 @@ certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai exceptiongroup==1.2.2 # via anyio frozenlist==1.6.2 @@ -41,10 +41,10 @@ h11==0.16.0 httpcore==1.0.9 # via httpx httpx==0.28.1 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via httpx-aiohttp httpx-aiohttp==0.1.8 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai idna==3.4 # via anyio # via httpx @@ -56,15 +56,15 @@ propcache==0.3.1 # via aiohttp # via yarl pydantic==2.10.3 - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai typing-extensions==4.12.2 # via anyio - # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via do-gradientai # via multidict # via pydantic # via pydantic-core From 602e033e8c017f3c9cf3177f09959586d25d5eac Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 22:05:52 +0000 Subject: [PATCH 123/200] feat(api): manual updates --- .stats.yml | 2 +- api.md | 212 ++++++++++++------------ src/do_gradientai/_client.py | 108 ++++++------ src/do_gradientai/resources/__init__.py | 24 +-- 4 files changed, 173 insertions(+), 173 deletions(-) diff --git a/.stats.yml b/.stats.yml index d3212787..0a4b2b8f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: b2605c7778e32305d2a6ce66bb4aa830 +config_hash: bf733b2049f2d40e594a316a42878458 diff --git a/api.md b/api.md index fa4e0edb..82831988 100644 --- a/api.md +++ b/api.md @@ -311,18 +311,6 @@ Methods: - client.chat.completions.create(\*\*params) -> CompletionCreateResponse -# Regions - -Types: - -```python -from do_gradientai.types import RegionListResponse -``` - -Methods: - -- client.regions.list(\*\*params) -> RegionListResponse - # KnowledgeBases Types: @@ -393,100 +381,6 @@ Methods: - client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse - client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse -# Inference - -## APIKeys - -Types: - -```python -from do_gradientai.types.inference import ( - APIModelAPIKeyInfo, - APIKeyCreateResponse, - APIKeyUpdateResponse, - APIKeyListResponse, - APIKeyDeleteResponse, - APIKeyUpdateRegenerateResponse, -) -``` - -Methods: - -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse - -# Models - -Types: - -```python -from do_gradientai.types import ( - APIAgreement, - APIModel, - APIModelVersion, - ModelRetrieveResponse, - ModelListResponse, -) -``` - -Methods: - -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse - -## Providers - -### Anthropic - -Types: - -```python -from do_gradientai.types.models.providers import ( - AnthropicCreateResponse, - AnthropicRetrieveResponse, - AnthropicUpdateResponse, - AnthropicListResponse, - AnthropicDeleteResponse, - AnthropicListAgentsResponse, -) -``` - -Methods: - -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse - -### OpenAI - -Types: - -```python -from do_gradientai.types.models.providers import ( - OpenAICreateResponse, - OpenAIRetrieveResponse, - OpenAIUpdateResponse, - OpenAIListResponse, - OpenAIDeleteResponse, - OpenAIRetrieveAgentsResponse, -) -``` - -Methods: - -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse - # GPUDroplets Types: @@ -875,3 +769,109 @@ Methods: - client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse - client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse - client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None + +# Inference + +## APIKeys + +Types: + +```python +from do_gradientai.types.inference import ( + APIModelAPIKeyInfo, + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, + APIKeyUpdateRegenerateResponse, +) +``` + +Methods: + +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse + +# Models + +Types: + +```python +from do_gradientai.types import ( + APIAgreement, + APIModel, + APIModelVersion, + ModelRetrieveResponse, + ModelListResponse, +) +``` + +Methods: + +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse + +## Providers + +### Anthropic + +Types: + +```python +from do_gradientai.types.models.providers import ( + AnthropicCreateResponse, + AnthropicRetrieveResponse, + AnthropicUpdateResponse, + AnthropicListResponse, + AnthropicDeleteResponse, + AnthropicListAgentsResponse, +) +``` + +Methods: + +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse + +### OpenAI + +Types: + +```python +from do_gradientai.types.models.providers import ( + OpenAICreateResponse, + OpenAIRetrieveResponse, + OpenAIUpdateResponse, + OpenAIListResponse, + OpenAIDeleteResponse, + OpenAIRetrieveAgentsResponse, +) +``` + +Methods: + +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse + +# Regions + +Types: + +```python +from do_gradientai.types import RegionListResponse +``` + +Methods: + +- client.regions.list(\*\*params) -> RegionListResponse diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index 92229a05..46398f6d 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -138,18 +138,18 @@ def chat(self) -> ChatResource: return ChatResource(self) - @cached_property - def regions(self) -> RegionsResource: - from .resources.regions import RegionsResource - - return RegionsResource(self) - @cached_property def knowledge_bases(self) -> KnowledgeBasesResource: from .resources.knowledge_bases import KnowledgeBasesResource return KnowledgeBasesResource(self) + @cached_property + def gpu_droplets(self) -> GPUDropletsResource: + from .resources.gpu_droplets import GPUDropletsResource + + return GPUDropletsResource(self) + @cached_property def inference(self) -> InferenceResource: from .resources.inference import InferenceResource @@ -163,10 +163,10 @@ def models(self) -> ModelsResource: return ModelsResource(self) @cached_property - def gpu_droplets(self) -> GPUDropletsResource: - from .resources.gpu_droplets import GPUDropletsResource + def regions(self) -> RegionsResource: + from .resources.regions import RegionsResource - return GPUDropletsResource(self) + return RegionsResource(self) @cached_property def with_raw_response(self) -> GradientAIWithRawResponse: @@ -387,18 +387,18 @@ def chat(self) -> AsyncChatResource: return AsyncChatResource(self) - @cached_property - def regions(self) -> AsyncRegionsResource: - from .resources.regions import AsyncRegionsResource - - return AsyncRegionsResource(self) - @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResource: from .resources.knowledge_bases import AsyncKnowledgeBasesResource return AsyncKnowledgeBasesResource(self) + @cached_property + def gpu_droplets(self) -> AsyncGPUDropletsResource: + from .resources.gpu_droplets import AsyncGPUDropletsResource + + return AsyncGPUDropletsResource(self) + @cached_property def inference(self) -> AsyncInferenceResource: from .resources.inference import AsyncInferenceResource @@ -412,10 +412,10 @@ def models(self) -> AsyncModelsResource: return AsyncModelsResource(self) @cached_property - def gpu_droplets(self) -> AsyncGPUDropletsResource: - from .resources.gpu_droplets import AsyncGPUDropletsResource + def regions(self) -> AsyncRegionsResource: + from .resources.regions import AsyncRegionsResource - return AsyncGPUDropletsResource(self) + return AsyncRegionsResource(self) @cached_property def with_raw_response(self) -> AsyncGradientAIWithRawResponse: @@ -569,18 +569,18 @@ def chat(self) -> chat.ChatResourceWithRawResponse: return ChatResourceWithRawResponse(self._client.chat) - @cached_property - def regions(self) -> regions.RegionsResourceWithRawResponse: - from .resources.regions import RegionsResourceWithRawResponse - - return RegionsResourceWithRawResponse(self._client.regions) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property + def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: + from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse + + return GPUDropletsResourceWithRawResponse(self._client.gpu_droplets) + @cached_property def inference(self) -> inference.InferenceResourceWithRawResponse: from .resources.inference import InferenceResourceWithRawResponse @@ -594,10 +594,10 @@ def models(self) -> models.ModelsResourceWithRawResponse: return ModelsResourceWithRawResponse(self._client.models) @cached_property - def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: - from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse + def regions(self) -> regions.RegionsResourceWithRawResponse: + from .resources.regions import RegionsResourceWithRawResponse - return GPUDropletsResourceWithRawResponse(self._client.gpu_droplets) + return RegionsResourceWithRawResponse(self._client.regions) class AsyncGradientAIWithRawResponse: @@ -618,18 +618,18 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse: return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property - def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: - from .resources.regions import AsyncRegionsResourceWithRawResponse - - return AsyncRegionsResourceWithRawResponse(self._client.regions) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property + def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: + from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse + + return AsyncGPUDropletsResourceWithRawResponse(self._client.gpu_droplets) + @cached_property def inference(self) -> inference.AsyncInferenceResourceWithRawResponse: from .resources.inference import AsyncInferenceResourceWithRawResponse @@ -643,10 +643,10 @@ def models(self) -> models.AsyncModelsResourceWithRawResponse: return AsyncModelsResourceWithRawResponse(self._client.models) @cached_property - def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: - from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse + def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: + from .resources.regions import AsyncRegionsResourceWithRawResponse - return AsyncGPUDropletsResourceWithRawResponse(self._client.gpu_droplets) + return AsyncRegionsResourceWithRawResponse(self._client.regions) class GradientAIWithStreamedResponse: @@ -667,18 +667,18 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse: return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property - def regions(self) -> regions.RegionsResourceWithStreamingResponse: - from .resources.regions import RegionsResourceWithStreamingResponse - - return RegionsResourceWithStreamingResponse(self._client.regions) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property + def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: + from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse + + return GPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) + @cached_property def inference(self) -> inference.InferenceResourceWithStreamingResponse: from .resources.inference import InferenceResourceWithStreamingResponse @@ -692,10 +692,10 @@ def models(self) -> models.ModelsResourceWithStreamingResponse: return ModelsResourceWithStreamingResponse(self._client.models) @cached_property - def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: - from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse + def regions(self) -> regions.RegionsResourceWithStreamingResponse: + from .resources.regions import RegionsResourceWithStreamingResponse - return GPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) + return RegionsResourceWithStreamingResponse(self._client.regions) class AsyncGradientAIWithStreamedResponse: @@ -716,18 +716,18 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property - def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: - from .resources.regions import AsyncRegionsResourceWithStreamingResponse - - return AsyncRegionsResourceWithStreamingResponse(self._client.regions) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property + def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse: + from .resources.gpu_droplets import AsyncGPUDropletsResourceWithStreamingResponse + + return AsyncGPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) + @cached_property def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse: from .resources.inference import AsyncInferenceResourceWithStreamingResponse @@ -741,10 +741,10 @@ def models(self) -> models.AsyncModelsResourceWithStreamingResponse: return AsyncModelsResourceWithStreamingResponse(self._client.models) @cached_property - def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse: - from .resources.gpu_droplets import AsyncGPUDropletsResourceWithStreamingResponse + def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: + from .resources.regions import AsyncRegionsResourceWithStreamingResponse - return AsyncGPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) + return AsyncRegionsResourceWithStreamingResponse(self._client.regions) Client = GradientAI diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index fd6da608..d17a6569 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -70,18 +70,18 @@ "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", - "RegionsResource", - "AsyncRegionsResource", - "RegionsResourceWithRawResponse", - "AsyncRegionsResourceWithRawResponse", - "RegionsResourceWithStreamingResponse", - "AsyncRegionsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", + "GPUDropletsResource", + "AsyncGPUDropletsResource", + "GPUDropletsResourceWithRawResponse", + "AsyncGPUDropletsResourceWithRawResponse", + "GPUDropletsResourceWithStreamingResponse", + "AsyncGPUDropletsResourceWithStreamingResponse", "InferenceResource", "AsyncInferenceResource", "InferenceResourceWithRawResponse", @@ -94,10 +94,10 @@ "AsyncModelsResourceWithRawResponse", "ModelsResourceWithStreamingResponse", "AsyncModelsResourceWithStreamingResponse", - "GPUDropletsResource", - "AsyncGPUDropletsResource", - "GPUDropletsResourceWithRawResponse", - "AsyncGPUDropletsResourceWithRawResponse", - "GPUDropletsResourceWithStreamingResponse", - "AsyncGPUDropletsResourceWithStreamingResponse", + "RegionsResource", + "AsyncRegionsResource", + "RegionsResourceWithRawResponse", + "AsyncRegionsResourceWithRawResponse", + "RegionsResourceWithStreamingResponse", + "AsyncRegionsResourceWithStreamingResponse", ] From d00aeea424f7c586b88ccca6bfe0571dc2369e63 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 23:06:58 +0000 Subject: [PATCH 124/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e2f2c074..3cf71e62 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.17" + ".": "0.1.0-alpha.18" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3e8561eb..6e1387d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "do_gradientai" -version = "0.1.0-alpha.17" +version = "0.1.0-alpha.18" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index b56314e9..de406b04 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.17" # x-release-please-version +__version__ = "0.1.0-alpha.18" # x-release-please-version From 8986757fbaaaef343b7a7158a483bdb64d9a1429 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 23:41:03 +0000 Subject: [PATCH 125/200] feat(api): manual updates --- .stats.yml | 2 +- api.md | 140 ++++++++++++------------ src/do_gradientai/_client.py | 72 ++++++------ src/do_gradientai/resources/__init__.py | 12 +- 4 files changed, 113 insertions(+), 113 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0a4b2b8f..bd8543f2 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: bf733b2049f2d40e594a316a42878458 +config_hash: 7bedef1b770034f1ca11addb398b0659 diff --git a/api.md b/api.md index 82831988..5d6e5491 100644 --- a/api.md +++ b/api.md @@ -311,76 +311,6 @@ Methods: - client.chat.completions.create(\*\*params) -> CompletionCreateResponse -# KnowledgeBases - -Types: - -```python -from do_gradientai.types import ( - APIKnowledgeBase, - KnowledgeBaseCreateResponse, - KnowledgeBaseRetrieveResponse, - KnowledgeBaseUpdateResponse, - KnowledgeBaseListResponse, - KnowledgeBaseDeleteResponse, -) -``` - -Methods: - -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse - -## DataSources - -Types: - -```python -from do_gradientai.types.knowledge_bases import ( - APIFileUploadDataSource, - APIKnowledgeBaseDataSource, - APISpacesDataSource, - APIWebCrawlerDataSource, - AwsDataSource, - DataSourceCreateResponse, - DataSourceListResponse, - DataSourceDeleteResponse, -) -``` - -Methods: - -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse - -## IndexingJobs - -Types: - -```python -from do_gradientai.types.knowledge_bases import ( - APIIndexedDataSource, - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) -``` - -Methods: - -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # GPUDroplets Types: @@ -795,6 +725,76 @@ Methods: - client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse - client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +# KnowledgeBases + +Types: + +```python +from do_gradientai.types import ( + APIKnowledgeBase, + KnowledgeBaseCreateResponse, + KnowledgeBaseRetrieveResponse, + KnowledgeBaseUpdateResponse, + KnowledgeBaseListResponse, + KnowledgeBaseDeleteResponse, +) +``` + +Methods: + +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse + +## DataSources + +Types: + +```python +from do_gradientai.types.knowledge_bases import ( + APIFileUploadDataSource, + APIKnowledgeBaseDataSource, + APISpacesDataSource, + APIWebCrawlerDataSource, + AwsDataSource, + DataSourceCreateResponse, + DataSourceListResponse, + DataSourceDeleteResponse, +) +``` + +Methods: + +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse + +## IndexingJobs + +Types: + +```python +from do_gradientai.types.knowledge_bases import ( + APIIndexedDataSource, + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # Models Types: diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index 46398f6d..e715ce61 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -138,12 +138,6 @@ def chat(self) -> ChatResource: return ChatResource(self) - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResource: - from .resources.knowledge_bases import KnowledgeBasesResource - - return KnowledgeBasesResource(self) - @cached_property def gpu_droplets(self) -> GPUDropletsResource: from .resources.gpu_droplets import GPUDropletsResource @@ -156,6 +150,12 @@ def inference(self) -> InferenceResource: return InferenceResource(self) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + from .resources.knowledge_bases import KnowledgeBasesResource + + return KnowledgeBasesResource(self) + @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -387,12 +387,6 @@ def chat(self) -> AsyncChatResource: return AsyncChatResource(self) - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResource: - from .resources.knowledge_bases import AsyncKnowledgeBasesResource - - return AsyncKnowledgeBasesResource(self) - @cached_property def gpu_droplets(self) -> AsyncGPUDropletsResource: from .resources.gpu_droplets import AsyncGPUDropletsResource @@ -405,6 +399,12 @@ def inference(self) -> AsyncInferenceResource: return AsyncInferenceResource(self) + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + from .resources.knowledge_bases import AsyncKnowledgeBasesResource + + return AsyncKnowledgeBasesResource(self) + @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -569,12 +569,6 @@ def chat(self) -> chat.ChatResourceWithRawResponse: return ChatResourceWithRawResponse(self._client.chat) - @cached_property - def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: - from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse - - return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse @@ -587,6 +581,12 @@ def inference(self) -> inference.InferenceResourceWithRawResponse: return InferenceResourceWithRawResponse(self._client.inference) + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse + + return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -618,12 +618,6 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse: return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property - def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: - from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse - - return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse @@ -636,6 +630,12 @@ def inference(self) -> inference.AsyncInferenceResourceWithRawResponse: return AsyncInferenceResourceWithRawResponse(self._client.inference) + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse + + return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -667,12 +667,6 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse: return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property - def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: - from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse - - return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse @@ -685,6 +679,12 @@ def inference(self) -> inference.InferenceResourceWithStreamingResponse: return InferenceResourceWithStreamingResponse(self._client.inference) + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse + + return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -716,12 +716,6 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property - def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: - from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse - - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse: from .resources.gpu_droplets import AsyncGPUDropletsResourceWithStreamingResponse @@ -734,6 +728,12 @@ def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse: return AsyncInferenceResourceWithStreamingResponse(self._client.inference) + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse + + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index d17a6569..45abd6a3 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -70,12 +70,6 @@ "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", "GPUDropletsResource", "AsyncGPUDropletsResource", "GPUDropletsResourceWithRawResponse", @@ -88,6 +82,12 @@ "AsyncInferenceResourceWithRawResponse", "InferenceResourceWithStreamingResponse", "AsyncInferenceResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", From 41e87a8acb3c91b75821aca80ed663c68d0532fe Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 23:51:20 +0000 Subject: [PATCH 126/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3cf71e62..b386befd 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.18" + ".": "0.1.0-alpha.19" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 6e1387d7..a28ca97c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "do_gradientai" -version = "0.1.0-alpha.18" +version = "0.1.0-alpha.19" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index de406b04..e13138f7 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.18" # x-release-please-version +__version__ = "0.1.0-alpha.19" # x-release-please-version From d4f279269d66f5d553075c95192772dac0591dfe Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sun, 20 Jul 2025 01:10:13 +0000 Subject: [PATCH 127/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index bd8543f2..f1abe106 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 7bedef1b770034f1ca11addb398b0659 +config_hash: 758b2407df0419282abc3748d3678040 From 7e52c5f81c81e5d1d2625f77cd96901459e405c4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sun, 20 Jul 2025 01:11:15 +0000 Subject: [PATCH 128/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index f1abe106..c8595dea 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 758b2407df0419282abc3748d3678040 +config_hash: 59409d795ace9604f1fb2062db23dd72 From a2f5fb9f35ed593d4c289bbf1e1afd884062f588 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sun, 20 Jul 2025 01:14:34 +0000 Subject: [PATCH 129/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index c8595dea..718d3432 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 59409d795ace9604f1fb2062db23dd72 +config_hash: 5cf9c7359c13307780aa25d0203b0b35 From a7ee148ac1441725cc7d425cfc49f7f85097f38b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 10:07:11 +0000 Subject: [PATCH 130/200] feat(api): manual updates --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 48 +- api.md | 418 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{do_gradientai => gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{do_gradientai => gradientai}/_client.py | 0 src/{do_gradientai => gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{do_gradientai => gradientai}/_files.py | 0 src/{do_gradientai => gradientai}/_models.py | 0 src/{do_gradientai => gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{do_gradientai => gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{do_gradientai => gradientai}/_version.py | 2 +- src/{do_gradientai => gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/chat/__init__.py | 0 .../resources/agents/chat/chat.py | 0 .../resources/agents/chat/completions.py | 0 .../resources/agents/evaluation_datasets.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/evaluation_metrics.py | 0 .../agents/evaluation_metrics/models.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 0 .../workspaces/workspaces.py | 0 .../resources/agents/evaluation_runs.py | 0 .../resources/agents/evaluation_test_cases.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/routes.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/gpu_droplets/__init__.py | 0 .../gpu_droplets/account/__init__.py | 0 .../resources/gpu_droplets/account/account.py | 0 .../resources/gpu_droplets/account/keys.py | 0 .../resources/gpu_droplets/actions.py | 0 .../resources/gpu_droplets/autoscale.py | 0 .../resources/gpu_droplets/backups.py | 0 .../destroy_with_associated_resources.py | 0 .../gpu_droplets/firewalls/__init__.py | 0 .../gpu_droplets/firewalls/droplets.py | 0 .../gpu_droplets/firewalls/firewalls.py | 0 .../resources/gpu_droplets/firewalls/rules.py | 0 .../resources/gpu_droplets/firewalls/tags.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../gpu_droplets/floating_ips/actions.py | 0 .../gpu_droplets/floating_ips/floating_ips.py | 0 .../resources/gpu_droplets/gpu_droplets.py | 0 .../resources/gpu_droplets/images/__init__.py | 0 .../resources/gpu_droplets/images/actions.py | 0 .../resources/gpu_droplets/images/images.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../gpu_droplets/load_balancers/droplets.py | 0 .../load_balancers/forwarding_rules.py | 0 .../load_balancers/load_balancers.py | 0 .../resources/gpu_droplets/sizes.py | 0 .../resources/gpu_droplets/snapshots.py | 0 .../gpu_droplets/volumes/__init__.py | 0 .../resources/gpu_droplets/volumes/actions.py | 0 .../gpu_droplets/volumes/snapshots.py | 0 .../resources/gpu_droplets/volumes/volumes.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/indexing_jobs.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/models/__init__.py | 0 .../resources/models/models.py | 0 .../resources/models/providers/__init__.py | 0 .../resources/models/providers/anthropic.py | 0 .../resources/models/providers/openai.py | 0 .../resources/models/providers/providers.py | 0 .../resources/regions.py | 0 .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 .../types/agents/chat/__init__.py | 0 .../agents/chat/completion_create_params.py | 0 .../agents/chat/completion_create_response.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_regions_params.py | 0 ...evaluation_metric_list_regions_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/model_list_params.py | 0 .../evaluation_metrics/model_list_response.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_params.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/droplet_backup_policy.py | 0 .../types/droplet_backup_policy_param.py | 0 .../types/gpu_droplet_create_params.py | 0 .../types/gpu_droplet_create_response.py | 0 .../types/gpu_droplet_delete_by_tag_params.py | 0 .../gpu_droplet_list_firewalls_params.py | 0 .../gpu_droplet_list_firewalls_response.py | 0 .../types/gpu_droplet_list_kernels_params.py | 0 .../gpu_droplet_list_kernels_response.py | 0 .../gpu_droplet_list_neighbors_response.py | 0 .../types/gpu_droplet_list_params.py | 0 .../types/gpu_droplet_list_response.py | 0 .../gpu_droplet_list_snapshots_params.py | 0 .../gpu_droplet_list_snapshots_response.py | 0 .../types/gpu_droplet_retrieve_response.py | 0 .../types/gpu_droplets/__init__.py | 0 .../types/gpu_droplets/account/__init__.py | 0 .../gpu_droplets/account/key_create_params.py | 0 .../account/key_create_response.py | 0 .../gpu_droplets/account/key_list_params.py | 0 .../gpu_droplets/account/key_list_response.py | 0 .../account/key_retrieve_response.py | 0 .../gpu_droplets/account/key_update_params.py | 0 .../account/key_update_response.py | 0 .../action_bulk_initiate_params.py | 0 .../action_bulk_initiate_response.py | 0 .../gpu_droplets/action_initiate_params.py | 0 .../gpu_droplets/action_initiate_response.py | 0 .../types/gpu_droplets/action_list_params.py | 0 .../gpu_droplets/action_list_response.py | 0 .../gpu_droplets/action_retrieve_response.py | 0 .../types/gpu_droplets/associated_resource.py | 0 .../gpu_droplets/autoscale_create_params.py | 0 .../gpu_droplets/autoscale_create_response.py | 0 .../autoscale_list_history_params.py | 0 .../autoscale_list_history_response.py | 0 .../autoscale_list_members_params.py | 0 .../autoscale_list_members_response.py | 0 .../gpu_droplets/autoscale_list_params.py | 0 .../gpu_droplets/autoscale_list_response.py | 0 .../types/gpu_droplets/autoscale_pool.py | 0 .../autoscale_pool_droplet_template.py | 0 .../autoscale_pool_droplet_template_param.py | 0 .../autoscale_pool_dynamic_config.py | 0 .../autoscale_pool_dynamic_config_param.py | 0 .../autoscale_pool_static_config.py | 0 .../autoscale_pool_static_config_param.py | 0 .../autoscale_retrieve_response.py | 0 .../gpu_droplets/autoscale_update_params.py | 0 .../gpu_droplets/autoscale_update_response.py | 0 .../types/gpu_droplets/backup_list_params.py | 0 .../backup_list_policies_params.py | 0 .../backup_list_policies_response.py | 0 .../gpu_droplets/backup_list_response.py | 0 ...backup_list_supported_policies_response.py | 0 .../backup_retrieve_policy_response.py | 0 .../types/gpu_droplets/current_utilization.py | 0 ...sociated_resource_check_status_response.py | 0 ...ciated_resource_delete_selective_params.py | 0 ..._with_associated_resource_list_response.py | 0 .../destroyed_associated_resource.py | 0 .../types/gpu_droplets/domains.py | 0 .../types/gpu_droplets/domains_param.py | 0 .../types/gpu_droplets/firewall.py | 0 .../gpu_droplets/firewall_create_params.py | 0 .../gpu_droplets/firewall_create_response.py | 0 .../gpu_droplets/firewall_list_params.py | 0 .../gpu_droplets/firewall_list_response.py | 0 .../types/gpu_droplets/firewall_param.py | 0 .../firewall_retrieve_response.py | 0 .../gpu_droplets/firewall_update_params.py | 0 .../gpu_droplets/firewall_update_response.py | 0 .../types/gpu_droplets/firewalls/__init__.py | 0 .../firewalls/droplet_add_params.py | 0 .../firewalls/droplet_remove_params.py | 0 .../gpu_droplets/firewalls/rule_add_params.py | 0 .../firewalls/rule_remove_params.py | 0 .../gpu_droplets/firewalls/tag_add_params.py | 0 .../firewalls/tag_remove_params.py | 0 .../types/gpu_droplets/floating_ip.py | 0 .../gpu_droplets/floating_ip_create_params.py | 0 .../floating_ip_create_response.py | 0 .../gpu_droplets/floating_ip_list_params.py | 0 .../gpu_droplets/floating_ip_list_response.py | 0 .../floating_ip_retrieve_response.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../floating_ips/action_create_params.py | 0 .../floating_ips/action_create_response.py | 0 .../floating_ips/action_list_response.py | 0 .../floating_ips/action_retrieve_response.py | 0 .../types/gpu_droplets/forwarding_rule.py | 0 .../gpu_droplets/forwarding_rule_param.py | 0 .../types/gpu_droplets/glb_settings.py | 0 .../types/gpu_droplets/glb_settings_param.py | 0 .../types/gpu_droplets/health_check.py | 0 .../types/gpu_droplets/health_check_param.py | 0 .../types/gpu_droplets/image_create_params.py | 0 .../gpu_droplets/image_create_response.py | 0 .../types/gpu_droplets/image_list_params.py | 0 .../types/gpu_droplets/image_list_response.py | 0 .../gpu_droplets/image_retrieve_response.py | 0 .../types/gpu_droplets/image_update_params.py | 0 .../gpu_droplets/image_update_response.py | 0 .../types/gpu_droplets/images/__init__.py | 0 .../images/action_create_params.py | 0 .../images/action_list_response.py | 0 .../types/gpu_droplets/lb_firewall.py | 0 .../types/gpu_droplets/lb_firewall_param.py | 0 .../types/gpu_droplets/load_balancer.py | 0 .../load_balancer_create_params.py | 0 .../load_balancer_create_response.py | 0 .../gpu_droplets/load_balancer_list_params.py | 0 .../load_balancer_list_response.py | 0 .../load_balancer_retrieve_response.py | 0 .../load_balancer_update_params.py | 0 .../load_balancer_update_response.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../load_balancers/droplet_add_params.py | 0 .../load_balancers/droplet_remove_params.py | 0 .../forwarding_rule_add_params.py | 0 .../forwarding_rule_remove_params.py | 0 .../types/gpu_droplets/size_list_params.py | 0 .../types/gpu_droplets/size_list_response.py | 0 .../gpu_droplets/snapshot_list_params.py | 0 .../gpu_droplets/snapshot_list_response.py | 0 .../snapshot_retrieve_response.py | 0 .../types/gpu_droplets/sticky_sessions.py | 0 .../gpu_droplets/sticky_sessions_param.py | 0 .../gpu_droplets/volume_create_params.py | 0 .../gpu_droplets/volume_create_response.py | 0 .../volume_delete_by_name_params.py | 0 .../types/gpu_droplets/volume_list_params.py | 0 .../gpu_droplets/volume_list_response.py | 0 .../gpu_droplets/volume_retrieve_response.py | 0 .../types/gpu_droplets/volumes/__init__.py | 0 .../volumes/action_initiate_by_id_params.py | 0 .../volumes/action_initiate_by_id_response.py | 0 .../volumes/action_initiate_by_name_params.py | 0 .../action_initiate_by_name_response.py | 0 .../volumes/action_list_params.py | 0 .../volumes/action_list_response.py | 0 .../volumes/action_retrieve_params.py | 0 .../volumes/action_retrieve_response.py | 0 .../volumes/snapshot_create_params.py | 0 .../volumes/snapshot_create_response.py | 0 .../volumes/snapshot_list_params.py | 0 .../volumes/snapshot_list_response.py | 0 .../volumes/snapshot_retrieve_response.py | 0 .../gpu_droplets/volumes/volume_action.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/model_list_response.py | 0 .../types/model_retrieve_response.py | 0 .../types/models/__init__.py | 0 .../types/models/providers/__init__.py | 0 .../providers/anthropic_create_params.py | 0 .../providers/anthropic_create_response.py | 0 .../providers/anthropic_delete_response.py | 0 .../providers/anthropic_list_agents_params.py | 0 .../anthropic_list_agents_response.py | 0 .../models/providers/anthropic_list_params.py | 0 .../providers/anthropic_list_response.py | 0 .../providers/anthropic_retrieve_response.py | 0 .../providers/anthropic_update_params.py | 0 .../providers/anthropic_update_response.py | 0 .../models/providers/openai_create_params.py | 0 .../providers/openai_create_response.py | 0 .../providers/openai_delete_response.py | 0 .../models/providers/openai_list_params.py | 0 .../models/providers/openai_list_response.py | 0 .../openai_retrieve_agents_params.py | 0 .../openai_retrieve_agents_response.py | 0 .../providers/openai_retrieve_response.py | 0 .../models/providers/openai_update_params.py | 0 .../providers/openai_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/action.py | 0 .../types/shared/action_link.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../types/shared/backward_links.py | 0 .../types/shared/chat_completion_chunk.py | 0 .../shared/chat_completion_token_logprob.py | 0 .../types/shared/completion_usage.py | 0 .../types/shared/disk_info.py | 0 .../types/shared/droplet.py | 0 .../shared/droplet_next_backup_window.py | 0 .../types/shared/firewall_rule_target.py | 0 .../types/shared/forward_links.py | 0 .../types/shared/garbage_collection.py | 0 .../types/shared/gpu_info.py | 0 .../types/shared/image.py | 0 .../types/shared/kernel.py | 0 .../types/shared/meta_properties.py | 0 .../types/shared/network_v4.py | 0 .../types/shared/network_v6.py | 0 .../types/shared/page_links.py | 0 .../types/shared/region.py | 0 .../types/shared/size.py | 0 .../types/shared/snapshots.py | 0 .../types/shared/subscription.py | 0 .../types/shared/subscription_tier_base.py | 0 .../types/shared/vpc_peering.py | 0 .../types/shared_params/__init__.py | 0 .../shared_params/firewall_rule_target.py | 0 .../agents/chat/test_completions.py | 4 +- .../agents/evaluation_metrics/test_models.py | 4 +- .../evaluation_metrics/test_workspaces.py | 4 +- .../workspaces/test_agents.py | 4 +- tests/api_resources/agents/test_api_keys.py | 4 +- .../agents/test_evaluation_datasets.py | 4 +- .../agents/test_evaluation_metrics.py | 4 +- .../agents/test_evaluation_runs.py | 4 +- .../agents/test_evaluation_test_cases.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_routes.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../gpu_droplets/account/test_keys.py | 4 +- .../gpu_droplets/firewalls/test_droplets.py | 2 +- .../gpu_droplets/firewalls/test_rules.py | 2 +- .../gpu_droplets/firewalls/test_tags.py | 2 +- .../gpu_droplets/floating_ips/test_actions.py | 4 +- .../gpu_droplets/images/test_actions.py | 6 +- .../load_balancers/test_droplets.py | 2 +- .../load_balancers/test_forwarding_rules.py | 2 +- .../gpu_droplets/test_actions.py | 4 +- .../gpu_droplets/test_autoscale.py | 4 +- .../gpu_droplets/test_backups.py | 4 +- .../test_destroy_with_associated_resources.py | 4 +- .../gpu_droplets/test_firewalls.py | 4 +- .../gpu_droplets/test_floating_ips.py | 4 +- .../api_resources/gpu_droplets/test_images.py | 4 +- .../gpu_droplets/test_load_balancers.py | 4 +- .../api_resources/gpu_droplets/test_sizes.py | 4 +- .../gpu_droplets/test_snapshots.py | 4 +- .../gpu_droplets/test_volumes.py | 4 +- .../gpu_droplets/volumes/test_actions.py | 4 +- .../gpu_droplets/volumes/test_snapshots.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../knowledge_bases/test_indexing_jobs.py | 4 +- .../models/providers/test_anthropic.py | 4 +- .../models/providers/test_openai.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_gpu_droplets.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 48 +- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 506 files changed, 398 insertions(+), 398 deletions(-) rename src/{do_gradientai => gradientai}/__init__.py (95%) rename src/{do_gradientai => gradientai}/_base_client.py (99%) rename src/{do_gradientai => gradientai}/_client.py (100%) rename src/{do_gradientai => gradientai}/_compat.py (100%) rename src/{do_gradientai => gradientai}/_constants.py (100%) rename src/{do_gradientai => gradientai}/_exceptions.py (100%) rename src/{do_gradientai => gradientai}/_files.py (100%) rename src/{do_gradientai => gradientai}/_models.py (100%) rename src/{do_gradientai => gradientai}/_qs.py (100%) rename src/{do_gradientai => gradientai}/_resource.py (100%) rename src/{do_gradientai => gradientai}/_response.py (99%) rename src/{do_gradientai => gradientai}/_streaming.py (100%) rename src/{do_gradientai => gradientai}/_types.py (99%) rename src/{do_gradientai => gradientai}/_utils/__init__.py (100%) rename src/{do_gradientai => gradientai}/_utils/_logs.py (75%) rename src/{do_gradientai => gradientai}/_utils/_proxy.py (100%) rename src/{do_gradientai => gradientai}/_utils/_reflection.py (100%) rename src/{do_gradientai => gradientai}/_utils/_resources_proxy.py (50%) rename src/{do_gradientai => gradientai}/_utils/_streams.py (100%) rename src/{do_gradientai => gradientai}/_utils/_sync.py (100%) rename src/{do_gradientai => gradientai}/_utils/_transform.py (100%) rename src/{do_gradientai => gradientai}/_utils/_typing.py (100%) rename src/{do_gradientai => gradientai}/_utils/_utils.py (100%) rename src/{do_gradientai => gradientai}/_version.py (83%) rename src/{do_gradientai => gradientai}/py.typed (100%) rename src/{do_gradientai => gradientai}/resources/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/agents.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/api_keys.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/chat/chat.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/chat/completions.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_datasets.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/evaluation_metrics.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/models.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/agents.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/workspaces.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_runs.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_test_cases.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/functions.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/routes.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/versions.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/chat.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/completions.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/account/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/account/account.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/account/keys.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/autoscale.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/backups.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/destroy_with_associated_resources.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/firewalls/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/firewalls/droplets.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/firewalls/firewalls.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/firewalls/rules.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/firewalls/tags.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/floating_ips/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/floating_ips/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/floating_ips/floating_ips.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/gpu_droplets.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/images/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/images/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/images/images.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/load_balancers/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/load_balancers/droplets.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/load_balancers/forwarding_rules.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/load_balancers/load_balancers.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/sizes.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/snapshots.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/volumes/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/volumes/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/volumes/snapshots.py (100%) rename src/{do_gradientai => gradientai}/resources/gpu_droplets/volumes/volumes.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/api_keys.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/inference.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/indexing_jobs.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{do_gradientai => gradientai}/resources/models/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/models/models.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/anthropic.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/openai.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/providers.py (100%) rename src/{do_gradientai => gradientai}/resources/regions.py (100%) rename src/{do_gradientai => gradientai}/types/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agent_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_status_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_status_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_metric.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_metric_result.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_prompt.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_run.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_test_case.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_star_metric.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_star_metric_param.py (100%) rename src/{do_gradientai => gradientai}/types/agents/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_regions_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_regions_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/model_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/model_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_list_results_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_add_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_view_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent_model.py (100%) rename src/{do_gradientai => gradientai}/types/api_agreement.py (100%) rename src/{do_gradientai => gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_deployment_visibility.py (100%) rename src/{do_gradientai => gradientai}/types/api_knowledge_base.py (100%) rename src/{do_gradientai => gradientai}/types/api_model.py (100%) rename src/{do_gradientai => gradientai}/types/api_model_version.py (100%) rename src/{do_gradientai => gradientai}/types/api_openai_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_retrieval_method.py (100%) rename src/{do_gradientai => gradientai}/types/api_workspace.py (100%) rename src/{do_gradientai => gradientai}/types/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_backup_policy.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_backup_policy_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_delete_by_tag_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_firewalls_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_firewalls_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_kernels_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_kernels_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_neighbors_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_snapshots_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_list_snapshots_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplet_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/account/key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_bulk_initiate_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_bulk_initiate_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_initiate_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_initiate_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/action_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/associated_resource.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_list_history_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_list_history_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_list_members_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_list_members_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool_droplet_template.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool_droplet_template_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool_static_config.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_pool_static_config_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/autoscale_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/backup_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/backup_list_policies_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/backup_list_policies_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/backup_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/backup_list_supported_policies_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/backup_retrieve_policy_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/current_utilization.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/destroy_with_associated_resource_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/destroyed_associated_resource.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/domains.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/domains_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewall_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/droplet_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/droplet_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/rule_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/rule_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/tag_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/firewalls/tag_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ip.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ip_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ip_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ip_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ip_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ip_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ips/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ips/action_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ips/action_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ips/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/floating_ips/action_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/forwarding_rule.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/forwarding_rule_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/glb_settings.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/glb_settings_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/health_check.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/health_check_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/image_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/images/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/images/action_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/images/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/lb_firewall.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/lb_firewall_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancer_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancers/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancers/droplet_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancers/droplet_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/size_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/size_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/snapshot_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/snapshot_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/snapshot_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/sticky_sessions.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/sticky_sessions_param.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volume_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volume_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volume_delete_by_name_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volume_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volume_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volume_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_retrieve_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/action_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/snapshot_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/snapshot_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/snapshot_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/snapshot_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/snapshot_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/gpu_droplets/volumes/volume_action.py (100%) rename src/{do_gradientai => gradientai}/types/inference/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_agents_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_agents_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_retrieve_agents_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_retrieve_agents_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/region_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/region_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/shared/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/shared/action.py (100%) rename src/{do_gradientai => gradientai}/types/shared/action_link.py (100%) rename src/{do_gradientai => gradientai}/types/shared/api_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/api_meta.py (100%) rename src/{do_gradientai => gradientai}/types/shared/backward_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/chat_completion_chunk.py (100%) rename src/{do_gradientai => gradientai}/types/shared/chat_completion_token_logprob.py (100%) rename src/{do_gradientai => gradientai}/types/shared/completion_usage.py (100%) rename src/{do_gradientai => gradientai}/types/shared/disk_info.py (100%) rename src/{do_gradientai => gradientai}/types/shared/droplet.py (100%) rename src/{do_gradientai => gradientai}/types/shared/droplet_next_backup_window.py (100%) rename src/{do_gradientai => gradientai}/types/shared/firewall_rule_target.py (100%) rename src/{do_gradientai => gradientai}/types/shared/forward_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/garbage_collection.py (100%) rename src/{do_gradientai => gradientai}/types/shared/gpu_info.py (100%) rename src/{do_gradientai => gradientai}/types/shared/image.py (100%) rename src/{do_gradientai => gradientai}/types/shared/kernel.py (100%) rename src/{do_gradientai => gradientai}/types/shared/meta_properties.py (100%) rename src/{do_gradientai => gradientai}/types/shared/network_v4.py (100%) rename src/{do_gradientai => gradientai}/types/shared/network_v6.py (100%) rename src/{do_gradientai => gradientai}/types/shared/page_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/region.py (100%) rename src/{do_gradientai => gradientai}/types/shared/size.py (100%) rename src/{do_gradientai => gradientai}/types/shared/snapshots.py (100%) rename src/{do_gradientai => gradientai}/types/shared/subscription.py (100%) rename src/{do_gradientai => gradientai}/types/shared/subscription_tier_base.py (100%) rename src/{do_gradientai => gradientai}/types/shared/vpc_peering.py (100%) rename src/{do_gradientai => gradientai}/types/shared_params/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/shared_params/firewall_rule_target.py (100%) diff --git a/.stats.yml b/.stats.yml index 718d3432..d7c07274 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 5cf9c7359c13307780aa25d0203b0b35 +config_hash: 732232c90ba4600bc44b6a96e14beb96 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f59c83a..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. +modify the contents of the `src/gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index df2a09de..a009a7cb 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -56,7 +56,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from do_gradientai import AsyncGradientAI +from gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -96,8 +96,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import asyncio -from do_gradientai import DefaultAioHttpClient -from do_gradientai import AsyncGradientAI +from gradientai import DefaultAioHttpClient +from gradientai import AsyncGradientAI async def main() -> None: @@ -125,7 +125,7 @@ asyncio.run(main()) We provide support for streaming responses using Server Side Events (SSE). ```python -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() @@ -146,7 +146,7 @@ for completion in stream: The async client uses the exact same interface. ```python -from do_gradientai import AsyncGradientAI +from gradientai import AsyncGradientAI client = AsyncGradientAI() @@ -178,7 +178,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() @@ -197,16 +197,16 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `do_gradientai.APIError`. +All errors inherit from `gradientai.APIError`. ```python -import do_gradientai -from do_gradientai import GradientAI +import gradientai +from gradientai import GradientAI client = GradientAI() @@ -220,12 +220,12 @@ try: ], model="llama3.3-70b-instruct", ) -except do_gradientai.APIConnectionError as e: +except gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except do_gradientai.RateLimitError as e: +except gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except do_gradientai.APIStatusError as e: +except gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -253,7 +253,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -279,7 +279,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -339,7 +339,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() response = client.chat.completions.with_raw_response.create( @@ -355,9 +355,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.choices) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -427,7 +427,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from do_gradientai import GradientAI, DefaultHttpxClient +from gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -450,7 +450,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from do_gradientai import GradientAI +from gradientai import GradientAI with GradientAI() as client: # make requests here @@ -478,8 +478,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import do_gradientai -print(do_gradientai.__version__) +import gradientai +print(gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 5d6e5491..0f4770e9 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from do_gradientai.types import ( +from gradientai.types import ( Action, ActionLink, APILinks, @@ -37,7 +37,7 @@ from do_gradientai.types import ( Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -57,19 +57,19 @@ from do_gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -80,11 +80,11 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Chat @@ -93,19 +93,19 @@ Methods: Types: ```python -from do_gradientai.types.agents.chat import CompletionCreateResponse +from gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) @@ -113,15 +113,15 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ( +from gradientai.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -133,19 +133,19 @@ from do_gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -153,27 +153,27 @@ from do_gradientai.types.agents.evaluation_metrics.workspaces import ( Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ### Models Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ModelListResponse +from gradientai.types.agents.evaluation_metrics import ModelListResponse ``` Methods: -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse ## EvaluationRuns Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -187,17 +187,17 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -210,18 +210,18 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -229,15 +229,15 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -246,43 +246,43 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -292,10 +292,10 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # Chat @@ -304,19 +304,19 @@ Methods: Types: ```python -from do_gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # GPUDroplets Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( DropletBackupPolicy, GPUDropletCreateResponse, GPUDropletRetrieveResponse, @@ -330,22 +330,22 @@ from do_gradientai.types import ( Methods: -- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse -- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse -- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse -- client.gpu_droplets.delete(droplet_id) -> None -- client.gpu_droplets.delete_by_tag(\*\*params) -> None -- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse -- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse -- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse -- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse ## Backups Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupListSupportedPoliciesResponse, @@ -355,17 +355,17 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse ## Actions Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( ActionRetrieveResponse, ActionListResponse, ActionBulkInitiateResponse, @@ -375,17 +375,17 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse ## DestroyWithAssociatedResources Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( AssociatedResource, DestroyedAssociatedResource, DestroyWithAssociatedResourceListResponse, @@ -395,18 +395,18 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None ## Autoscale Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( AutoscalePool, AutoscalePoolDropletTemplate, AutoscalePoolDynamicConfig, @@ -423,21 +423,21 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse ## Firewalls Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( Firewall, FirewallCreateResponse, FirewallRetrieveResponse, @@ -448,39 +448,39 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse -- client.gpu_droplets.firewalls.delete(firewall_id) -> None +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None ### Droplets Methods: -- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None ### Tags Methods: -- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None ### Rules Methods: -- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None ## FloatingIPs Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( FloatingIP, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -490,17 +490,17 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.gpu_droplets.floating_ips.delete(floating_ip) -> None +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None ### Actions Types: ```python -from do_gradientai.types.gpu_droplets.floating_ips import ( +from gradientai.types.gpu_droplets.floating_ips import ( ActionCreateResponse, ActionRetrieveResponse, ActionListResponse, @@ -509,16 +509,16 @@ from do_gradientai.types.gpu_droplets.floating_ips import ( Methods: -- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse ## Images Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( ImageCreateResponse, ImageRetrieveResponse, ImageUpdateResponse, @@ -528,32 +528,32 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse -- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse -- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse -- client.gpu_droplets.images.delete(image_id) -> None +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None ### Actions Types: ```python -from do_gradientai.types.gpu_droplets.images import ActionListResponse +from gradientai.types.gpu_droplets.images import ActionListResponse ``` Methods: -- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action -- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse ## LoadBalancers Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( Domains, ForwardingRule, GlbSettings, @@ -570,59 +570,59 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.gpu_droplets.load_balancers.delete(lb_id) -> None -- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None ### Droplets Methods: -- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None ### ForwardingRules Methods: -- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None ## Sizes Types: ```python -from do_gradientai.types.gpu_droplets import SizeListResponse +from gradientai.types.gpu_droplets import SizeListResponse ``` Methods: -- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse ## Snapshots Types: ```python -from do_gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +from gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse ``` Methods: -- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse -- client.gpu_droplets.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None ## Volumes Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse, @@ -631,18 +631,18 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse -- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse -- client.gpu_droplets.volumes.delete(volume_id) -> None -- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None ### Actions Types: ```python -from do_gradientai.types.gpu_droplets.volumes import ( +from gradientai.types.gpu_droplets.volumes import ( VolumeAction, ActionRetrieveResponse, ActionListResponse, @@ -653,17 +653,17 @@ from do_gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse ### Snapshots Types: ```python -from do_gradientai.types.gpu_droplets.volumes import ( +from gradientai.types.gpu_droplets.volumes import ( SnapshotCreateResponse, SnapshotRetrieveResponse, SnapshotListResponse, @@ -672,10 +672,10 @@ from do_gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None ## Account @@ -684,7 +684,7 @@ Methods: Types: ```python -from do_gradientai.types.gpu_droplets.account import ( +from gradientai.types.gpu_droplets.account import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -694,11 +694,11 @@ from do_gradientai.types.gpu_droplets.account import ( Methods: -- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse -- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse -- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None # Inference @@ -707,7 +707,7 @@ Methods: Types: ```python -from do_gradientai.types.inference import ( +from gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -719,18 +719,18 @@ from do_gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # KnowledgeBases Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -742,18 +742,18 @@ from do_gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -767,16 +767,16 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -789,18 +789,18 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Models Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIAgreement, APIModel, APIModelVersion, @@ -811,8 +811,8 @@ from do_gradientai.types import ( Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -821,7 +821,7 @@ Methods: Types: ```python -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( AnthropicCreateResponse, AnthropicRetrieveResponse, AnthropicUpdateResponse, @@ -833,19 +833,19 @@ from do_gradientai.types.models.providers import ( Methods: -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse ### OpenAI Types: ```python -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( OpenAICreateResponse, OpenAIRetrieveResponse, OpenAIUpdateResponse, @@ -857,21 +857,21 @@ from do_gradientai.types.models.providers import ( Methods: -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse # Regions Types: ```python -from do_gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse diff --git a/mypy.ini b/mypy.ini index 82b0c891..748d8234 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index a28ca97c..066f5a40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,14 +79,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import do_gradientai'" +"check:importable" = "python -c 'import gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -99,7 +99,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/do_gradientai"] +packages = ["src/gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -202,7 +202,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["do_gradientai", "tests"] +known-first-party = ["gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index a320c1a8..2ff9a58c 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/do_gradientai/_version.py" + "src/gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index e46e909b..37b38f6f 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import do_gradientai' +rye run python -c 'import gradientai' diff --git a/src/do_gradientai/__init__.py b/src/gradientai/__init__.py similarity index 95% rename from src/do_gradientai/__init__.py rename to src/gradientai/__init__.py index 41b943b2..3316fe47 100644 --- a/src/do_gradientai/__init__.py +++ b/src/gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError +# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "do_gradientai" + __locals[__name].__module__ = "gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/do_gradientai/_base_client.py b/src/gradientai/_base_client.py similarity index 99% rename from src/do_gradientai/_base_client.py rename to src/gradientai/_base_client.py index 326c662c..379c27d1 100644 --- a/src/do_gradientai/_base_client.py +++ b/src/gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/do_gradientai/_client.py b/src/gradientai/_client.py similarity index 100% rename from src/do_gradientai/_client.py rename to src/gradientai/_client.py diff --git a/src/do_gradientai/_compat.py b/src/gradientai/_compat.py similarity index 100% rename from src/do_gradientai/_compat.py rename to src/gradientai/_compat.py diff --git a/src/do_gradientai/_constants.py b/src/gradientai/_constants.py similarity index 100% rename from src/do_gradientai/_constants.py rename to src/gradientai/_constants.py diff --git a/src/do_gradientai/_exceptions.py b/src/gradientai/_exceptions.py similarity index 100% rename from src/do_gradientai/_exceptions.py rename to src/gradientai/_exceptions.py diff --git a/src/do_gradientai/_files.py b/src/gradientai/_files.py similarity index 100% rename from src/do_gradientai/_files.py rename to src/gradientai/_files.py diff --git a/src/do_gradientai/_models.py b/src/gradientai/_models.py similarity index 100% rename from src/do_gradientai/_models.py rename to src/gradientai/_models.py diff --git a/src/do_gradientai/_qs.py b/src/gradientai/_qs.py similarity index 100% rename from src/do_gradientai/_qs.py rename to src/gradientai/_qs.py diff --git a/src/do_gradientai/_resource.py b/src/gradientai/_resource.py similarity index 100% rename from src/do_gradientai/_resource.py rename to src/gradientai/_resource.py diff --git a/src/do_gradientai/_response.py b/src/gradientai/_response.py similarity index 99% rename from src/do_gradientai/_response.py rename to src/gradientai/_response.py index 8ca43971..2037e4ca 100644 --- a/src/do_gradientai/_response.py +++ b/src/gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", ) diff --git a/src/do_gradientai/_streaming.py b/src/gradientai/_streaming.py similarity index 100% rename from src/do_gradientai/_streaming.py rename to src/gradientai/_streaming.py diff --git a/src/do_gradientai/_types.py b/src/gradientai/_types.py similarity index 99% rename from src/do_gradientai/_types.py rename to src/gradientai/_types.py index c356c700..1bac876d 100644 --- a/src/do_gradientai/_types.py +++ b/src/gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from do_gradientai import NoneType +# from gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/do_gradientai/_utils/__init__.py b/src/gradientai/_utils/__init__.py similarity index 100% rename from src/do_gradientai/_utils/__init__.py rename to src/gradientai/_utils/__init__.py diff --git a/src/do_gradientai/_utils/_logs.py b/src/gradientai/_utils/_logs.py similarity index 75% rename from src/do_gradientai/_utils/_logs.py rename to src/gradientai/_utils/_logs.py index ac45b1a5..9047e5c8 100644 --- a/src/do_gradientai/_utils/_logs.py +++ b/src/gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("do_gradientai") +logger: logging.Logger = logging.getLogger("gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/do_gradientai/_utils/_proxy.py b/src/gradientai/_utils/_proxy.py similarity index 100% rename from src/do_gradientai/_utils/_proxy.py rename to src/gradientai/_utils/_proxy.py diff --git a/src/do_gradientai/_utils/_reflection.py b/src/gradientai/_utils/_reflection.py similarity index 100% rename from src/do_gradientai/_utils/_reflection.py rename to src/gradientai/_utils/_reflection.py diff --git a/src/do_gradientai/_utils/_resources_proxy.py b/src/gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/do_gradientai/_utils/_resources_proxy.py rename to src/gradientai/_utils/_resources_proxy.py index 03763c3b..b3bc4931 100644 --- a/src/do_gradientai/_utils/_resources_proxy.py +++ b/src/gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `do_gradientai.resources` module. + """A proxy for the `gradientai.resources` module. - This is used so that we can lazily import `do_gradientai.resources` only when - needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` + This is used so that we can lazily import `gradientai.resources` only when + needed *and* so that users can just import `gradientai` and reference `gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("do_gradientai.resources") + mod = importlib.import_module("gradientai.resources") return mod diff --git a/src/do_gradientai/_utils/_streams.py b/src/gradientai/_utils/_streams.py similarity index 100% rename from src/do_gradientai/_utils/_streams.py rename to src/gradientai/_utils/_streams.py diff --git a/src/do_gradientai/_utils/_sync.py b/src/gradientai/_utils/_sync.py similarity index 100% rename from src/do_gradientai/_utils/_sync.py rename to src/gradientai/_utils/_sync.py diff --git a/src/do_gradientai/_utils/_transform.py b/src/gradientai/_utils/_transform.py similarity index 100% rename from src/do_gradientai/_utils/_transform.py rename to src/gradientai/_utils/_transform.py diff --git a/src/do_gradientai/_utils/_typing.py b/src/gradientai/_utils/_typing.py similarity index 100% rename from src/do_gradientai/_utils/_typing.py rename to src/gradientai/_utils/_typing.py diff --git a/src/do_gradientai/_utils/_utils.py b/src/gradientai/_utils/_utils.py similarity index 100% rename from src/do_gradientai/_utils/_utils.py rename to src/gradientai/_utils/_utils.py diff --git a/src/do_gradientai/_version.py b/src/gradientai/_version.py similarity index 83% rename from src/do_gradientai/_version.py rename to src/gradientai/_version.py index e13138f7..9ba60879 100644 --- a/src/do_gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "do_gradientai" +__title__ = "gradientai" __version__ = "0.1.0-alpha.19" # x-release-please-version diff --git a/src/do_gradientai/py.typed b/src/gradientai/py.typed similarity index 100% rename from src/do_gradientai/py.typed rename to src/gradientai/py.typed diff --git a/src/do_gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py similarity index 100% rename from src/do_gradientai/resources/__init__.py rename to src/gradientai/resources/__init__.py diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/__init__.py rename to src/gradientai/resources/agents/__init__.py diff --git a/src/do_gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py similarity index 100% rename from src/do_gradientai/resources/agents/agents.py rename to src/gradientai/resources/agents/agents.py diff --git a/src/do_gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py similarity index 100% rename from src/do_gradientai/resources/agents/api_keys.py rename to src/gradientai/resources/agents/api_keys.py diff --git a/src/do_gradientai/resources/agents/chat/__init__.py b/src/gradientai/resources/agents/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/__init__.py rename to src/gradientai/resources/agents/chat/__init__.py diff --git a/src/do_gradientai/resources/agents/chat/chat.py b/src/gradientai/resources/agents/chat/chat.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/chat.py rename to src/gradientai/resources/agents/chat/chat.py diff --git a/src/do_gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/completions.py rename to src/gradientai/resources/agents/chat/completions.py diff --git a/src/do_gradientai/resources/agents/evaluation_datasets.py b/src/gradientai/resources/agents/evaluation_datasets.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_datasets.py rename to src/gradientai/resources/agents/evaluation_datasets.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/gradientai/resources/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/models.py b/src/gradientai/resources/agents/evaluation_metrics/models.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/models.py rename to src/gradientai/resources/agents/evaluation_metrics/models.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py diff --git a/src/do_gradientai/resources/agents/evaluation_runs.py b/src/gradientai/resources/agents/evaluation_runs.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_runs.py rename to src/gradientai/resources/agents/evaluation_runs.py diff --git a/src/do_gradientai/resources/agents/evaluation_test_cases.py b/src/gradientai/resources/agents/evaluation_test_cases.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_test_cases.py rename to src/gradientai/resources/agents/evaluation_test_cases.py diff --git a/src/do_gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py similarity index 100% rename from src/do_gradientai/resources/agents/functions.py rename to src/gradientai/resources/agents/functions.py diff --git a/src/do_gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/do_gradientai/resources/agents/knowledge_bases.py rename to src/gradientai/resources/agents/knowledge_bases.py diff --git a/src/do_gradientai/resources/agents/routes.py b/src/gradientai/resources/agents/routes.py similarity index 100% rename from src/do_gradientai/resources/agents/routes.py rename to src/gradientai/resources/agents/routes.py diff --git a/src/do_gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 100% rename from src/do_gradientai/resources/agents/versions.py rename to src/gradientai/resources/agents/versions.py diff --git a/src/do_gradientai/resources/chat/__init__.py b/src/gradientai/resources/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/chat/__init__.py rename to src/gradientai/resources/chat/__init__.py diff --git a/src/do_gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py similarity index 100% rename from src/do_gradientai/resources/chat/chat.py rename to src/gradientai/resources/chat/chat.py diff --git a/src/do_gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py similarity index 100% rename from src/do_gradientai/resources/chat/completions.py rename to src/gradientai/resources/chat/completions.py diff --git a/src/do_gradientai/resources/gpu_droplets/__init__.py b/src/gradientai/resources/gpu_droplets/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/__init__.py rename to src/gradientai/resources/gpu_droplets/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/account/__init__.py b/src/gradientai/resources/gpu_droplets/account/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/account/__init__.py rename to src/gradientai/resources/gpu_droplets/account/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/account/account.py b/src/gradientai/resources/gpu_droplets/account/account.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/account/account.py rename to src/gradientai/resources/gpu_droplets/account/account.py diff --git a/src/do_gradientai/resources/gpu_droplets/account/keys.py b/src/gradientai/resources/gpu_droplets/account/keys.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/account/keys.py rename to src/gradientai/resources/gpu_droplets/account/keys.py diff --git a/src/do_gradientai/resources/gpu_droplets/actions.py b/src/gradientai/resources/gpu_droplets/actions.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/actions.py rename to src/gradientai/resources/gpu_droplets/actions.py diff --git a/src/do_gradientai/resources/gpu_droplets/autoscale.py b/src/gradientai/resources/gpu_droplets/autoscale.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/autoscale.py rename to src/gradientai/resources/gpu_droplets/autoscale.py diff --git a/src/do_gradientai/resources/gpu_droplets/backups.py b/src/gradientai/resources/gpu_droplets/backups.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/backups.py rename to src/gradientai/resources/gpu_droplets/backups.py diff --git a/src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py rename to src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/gradientai/resources/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py rename to src/gradientai/resources/gpu_droplets/firewalls/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/gradientai/resources/gpu_droplets/firewalls/droplets.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py rename to src/gradientai/resources/gpu_droplets/firewalls/droplets.py diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py rename to src/gradientai/resources/gpu_droplets/firewalls/firewalls.py diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/rules.py b/src/gradientai/resources/gpu_droplets/firewalls/rules.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/firewalls/rules.py rename to src/gradientai/resources/gpu_droplets/firewalls/rules.py diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/tags.py b/src/gradientai/resources/gpu_droplets/firewalls/tags.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/firewalls/tags.py rename to src/gradientai/resources/gpu_droplets/firewalls/tags.py diff --git a/src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py rename to src/gradientai/resources/gpu_droplets/floating_ips/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/gradientai/resources/gpu_droplets/floating_ips/actions.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py rename to src/gradientai/resources/gpu_droplets/floating_ips/actions.py diff --git a/src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py rename to src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py diff --git a/src/do_gradientai/resources/gpu_droplets/gpu_droplets.py b/src/gradientai/resources/gpu_droplets/gpu_droplets.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/gpu_droplets.py rename to src/gradientai/resources/gpu_droplets/gpu_droplets.py diff --git a/src/do_gradientai/resources/gpu_droplets/images/__init__.py b/src/gradientai/resources/gpu_droplets/images/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/images/__init__.py rename to src/gradientai/resources/gpu_droplets/images/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/images/actions.py b/src/gradientai/resources/gpu_droplets/images/actions.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/images/actions.py rename to src/gradientai/resources/gpu_droplets/images/actions.py diff --git a/src/do_gradientai/resources/gpu_droplets/images/images.py b/src/gradientai/resources/gpu_droplets/images/images.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/images/images.py rename to src/gradientai/resources/gpu_droplets/images/images.py diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py rename to src/gradientai/resources/gpu_droplets/load_balancers/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py rename to src/gradientai/resources/gpu_droplets/load_balancers/droplets.py diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py rename to src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py rename to src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py diff --git a/src/do_gradientai/resources/gpu_droplets/sizes.py b/src/gradientai/resources/gpu_droplets/sizes.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/sizes.py rename to src/gradientai/resources/gpu_droplets/sizes.py diff --git a/src/do_gradientai/resources/gpu_droplets/snapshots.py b/src/gradientai/resources/gpu_droplets/snapshots.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/snapshots.py rename to src/gradientai/resources/gpu_droplets/snapshots.py diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/__init__.py b/src/gradientai/resources/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/volumes/__init__.py rename to src/gradientai/resources/gpu_droplets/volumes/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/actions.py b/src/gradientai/resources/gpu_droplets/volumes/actions.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/volumes/actions.py rename to src/gradientai/resources/gpu_droplets/volumes/actions.py diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/gradientai/resources/gpu_droplets/volumes/snapshots.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py rename to src/gradientai/resources/gpu_droplets/volumes/snapshots.py diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/volumes.py b/src/gradientai/resources/gpu_droplets/volumes/volumes.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/volumes/volumes.py rename to src/gradientai/resources/gpu_droplets/volumes/volumes.py diff --git a/src/do_gradientai/resources/inference/__init__.py b/src/gradientai/resources/inference/__init__.py similarity index 100% rename from src/do_gradientai/resources/inference/__init__.py rename to src/gradientai/resources/inference/__init__.py diff --git a/src/do_gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py similarity index 100% rename from src/do_gradientai/resources/inference/api_keys.py rename to src/gradientai/resources/inference/api_keys.py diff --git a/src/do_gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py similarity index 100% rename from src/do_gradientai/resources/inference/inference.py rename to src/gradientai/resources/inference/inference.py diff --git a/src/do_gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/__init__.py rename to src/gradientai/resources/knowledge_bases/__init__.py diff --git a/src/do_gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/data_sources.py rename to src/gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/gradientai/resources/knowledge_bases/indexing_jobs.py diff --git a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/do_gradientai/resources/models/__init__.py b/src/gradientai/resources/models/__init__.py similarity index 100% rename from src/do_gradientai/resources/models/__init__.py rename to src/gradientai/resources/models/__init__.py diff --git a/src/do_gradientai/resources/models/models.py b/src/gradientai/resources/models/models.py similarity index 100% rename from src/do_gradientai/resources/models/models.py rename to src/gradientai/resources/models/models.py diff --git a/src/do_gradientai/resources/models/providers/__init__.py b/src/gradientai/resources/models/providers/__init__.py similarity index 100% rename from src/do_gradientai/resources/models/providers/__init__.py rename to src/gradientai/resources/models/providers/__init__.py diff --git a/src/do_gradientai/resources/models/providers/anthropic.py b/src/gradientai/resources/models/providers/anthropic.py similarity index 100% rename from src/do_gradientai/resources/models/providers/anthropic.py rename to src/gradientai/resources/models/providers/anthropic.py diff --git a/src/do_gradientai/resources/models/providers/openai.py b/src/gradientai/resources/models/providers/openai.py similarity index 100% rename from src/do_gradientai/resources/models/providers/openai.py rename to src/gradientai/resources/models/providers/openai.py diff --git a/src/do_gradientai/resources/models/providers/providers.py b/src/gradientai/resources/models/providers/providers.py similarity index 100% rename from src/do_gradientai/resources/models/providers/providers.py rename to src/gradientai/resources/models/providers/providers.py diff --git a/src/do_gradientai/resources/regions.py b/src/gradientai/resources/regions.py similarity index 100% rename from src/do_gradientai/resources/regions.py rename to src/gradientai/resources/regions.py diff --git a/src/do_gradientai/types/__init__.py b/src/gradientai/types/__init__.py similarity index 100% rename from src/do_gradientai/types/__init__.py rename to src/gradientai/types/__init__.py diff --git a/src/do_gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py similarity index 100% rename from src/do_gradientai/types/agent_create_params.py rename to src/gradientai/types/agent_create_params.py diff --git a/src/do_gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py similarity index 100% rename from src/do_gradientai/types/agent_create_response.py rename to src/gradientai/types/agent_create_response.py diff --git a/src/do_gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py similarity index 100% rename from src/do_gradientai/types/agent_delete_response.py rename to src/gradientai/types/agent_delete_response.py diff --git a/src/do_gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agent_list_params.py rename to src/gradientai/types/agent_list_params.py diff --git a/src/do_gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agent_list_response.py rename to src/gradientai/types/agent_list_response.py diff --git a/src/do_gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agent_retrieve_response.py rename to src/gradientai/types/agent_retrieve_response.py diff --git a/src/do_gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_params.py rename to src/gradientai/types/agent_update_params.py diff --git a/src/do_gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_response.py rename to src/gradientai/types/agent_update_response.py diff --git a/src/do_gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_params.py rename to src/gradientai/types/agent_update_status_params.py diff --git a/src/do_gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_response.py rename to src/gradientai/types/agent_update_status_response.py diff --git a/src/do_gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/__init__.py rename to src/gradientai/types/agents/__init__.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric.py b/src/gradientai/types/agents/api_evaluation_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric.py rename to src/gradientai/types/agents/api_evaluation_metric.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric_result.py b/src/gradientai/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric_result.py rename to src/gradientai/types/agents/api_evaluation_metric_result.py diff --git a/src/do_gradientai/types/agents/api_evaluation_prompt.py b/src/gradientai/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_prompt.py rename to src/gradientai/types/agents/api_evaluation_prompt.py diff --git a/src/do_gradientai/types/agents/api_evaluation_run.py b/src/gradientai/types/agents/api_evaluation_run.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_run.py rename to src/gradientai/types/agents/api_evaluation_run.py diff --git a/src/do_gradientai/types/agents/api_evaluation_test_case.py b/src/gradientai/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_test_case.py rename to src/gradientai/types/agents/api_evaluation_test_case.py diff --git a/src/do_gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_params.py rename to src/gradientai/types/agents/api_key_create_params.py diff --git a/src/do_gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_response.py rename to src/gradientai/types/agents/api_key_create_response.py diff --git a/src/do_gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_delete_response.py rename to src/gradientai/types/agents/api_key_delete_response.py diff --git a/src/do_gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_params.py rename to src/gradientai/types/agents/api_key_list_params.py diff --git a/src/do_gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_response.py rename to src/gradientai/types/agents/api_key_list_response.py diff --git a/src/do_gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_regenerate_response.py rename to src/gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/do_gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_params.py rename to src/gradientai/types/agents/api_key_update_params.py diff --git a/src/do_gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_response.py rename to src/gradientai/types/agents/api_key_update_response.py diff --git a/src/do_gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/do_gradientai/types/agents/api_link_knowledge_base_output.py rename to src/gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/do_gradientai/types/agents/api_star_metric.py b/src/gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric.py rename to src/gradientai/types/agents/api_star_metric.py diff --git a/src/do_gradientai/types/agents/api_star_metric_param.py b/src/gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric_param.py rename to src/gradientai/types/agents/api_star_metric_param.py diff --git a/src/do_gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/chat/__init__.py rename to src/gradientai/types/agents/chat/__init__.py diff --git a/src/do_gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/chat/completion_create_params.py rename to src/gradientai/types/agents/chat/completion_create_params.py diff --git a/src/do_gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/chat/completion_create_response.py rename to src/gradientai/types/agents/chat/completion_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_params.py b/src/gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_params.py rename to src/gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_response.py b/src/gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_response.py rename to src/gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py rename to src/gradientai/types/agents/evaluation_metric_list_regions_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py rename to src/gradientai/types/agents/evaluation_metric_list_regions_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_response.py b/src/gradientai/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_response.py rename to src/gradientai/types/agents/evaluation_metric_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradientai/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/__init__.py rename to src/gradientai/types/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/model_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/model_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_params.py b/src/gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_params.py rename to src/gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_response.py b/src/gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_response.py rename to src/gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_params.py b/src/gradientai/types/agents/evaluation_run_list_results_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_params.py rename to src/gradientai/types/agents/evaluation_run_list_results_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradientai/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_response.py rename to src/gradientai/types/agents/evaluation_run_list_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py b/src/gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/gradientai/types/agents/evaluation_run_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/gradientai/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/gradientai/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_params.py b/src/gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_params.py rename to src/gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_response.py b/src/gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_response.py rename to src/gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_response.py rename to src/gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/gradientai/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/gradientai/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_params.py rename to src/gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_response.py b/src/gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_response.py rename to src/gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/do_gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_params.py rename to src/gradientai/types/agents/function_create_params.py diff --git a/src/do_gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_response.py rename to src/gradientai/types/agents/function_create_response.py diff --git a/src/do_gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_delete_response.py rename to src/gradientai/types/agents/function_delete_response.py diff --git a/src/do_gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_params.py rename to src/gradientai/types/agents/function_update_params.py diff --git a/src/do_gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_response.py rename to src/gradientai/types/agents/function_update_response.py diff --git a/src/do_gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/do_gradientai/types/agents/knowledge_base_detach_response.py rename to src/gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/do_gradientai/types/agents/route_add_params.py b/src/gradientai/types/agents/route_add_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_params.py rename to src/gradientai/types/agents/route_add_params.py diff --git a/src/do_gradientai/types/agents/route_add_response.py b/src/gradientai/types/agents/route_add_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_response.py rename to src/gradientai/types/agents/route_add_response.py diff --git a/src/do_gradientai/types/agents/route_delete_response.py b/src/gradientai/types/agents/route_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_delete_response.py rename to src/gradientai/types/agents/route_delete_response.py diff --git a/src/do_gradientai/types/agents/route_update_params.py b/src/gradientai/types/agents/route_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_params.py rename to src/gradientai/types/agents/route_update_params.py diff --git a/src/do_gradientai/types/agents/route_update_response.py b/src/gradientai/types/agents/route_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_response.py rename to src/gradientai/types/agents/route_update_response.py diff --git a/src/do_gradientai/types/agents/route_view_response.py b/src/gradientai/types/agents/route_view_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_view_response.py rename to src/gradientai/types/agents/route_view_response.py diff --git a/src/do_gradientai/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/do_gradientai/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/do_gradientai/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/do_gradientai/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/do_gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py similarity index 100% rename from src/do_gradientai/types/api_agent.py rename to src/gradientai/types/api_agent.py diff --git a/src/do_gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_agent_api_key_info.py rename to src/gradientai/types/api_agent_api_key_info.py diff --git a/src/do_gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py similarity index 100% rename from src/do_gradientai/types/api_agent_model.py rename to src/gradientai/types/api_agent_model.py diff --git a/src/do_gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py similarity index 100% rename from src/do_gradientai/types/api_agreement.py rename to src/gradientai/types/api_agreement.py diff --git a/src/do_gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_anthropic_api_key_info.py rename to src/gradientai/types/api_anthropic_api_key_info.py diff --git a/src/do_gradientai/types/api_deployment_visibility.py b/src/gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/do_gradientai/types/api_deployment_visibility.py rename to src/gradientai/types/api_deployment_visibility.py diff --git a/src/do_gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py similarity index 100% rename from src/do_gradientai/types/api_knowledge_base.py rename to src/gradientai/types/api_knowledge_base.py diff --git a/src/do_gradientai/types/api_model.py b/src/gradientai/types/api_model.py similarity index 100% rename from src/do_gradientai/types/api_model.py rename to src/gradientai/types/api_model.py diff --git a/src/do_gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py similarity index 100% rename from src/do_gradientai/types/api_model_version.py rename to src/gradientai/types/api_model_version.py diff --git a/src/do_gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_openai_api_key_info.py rename to src/gradientai/types/api_openai_api_key_info.py diff --git a/src/do_gradientai/types/api_retrieval_method.py b/src/gradientai/types/api_retrieval_method.py similarity index 100% rename from src/do_gradientai/types/api_retrieval_method.py rename to src/gradientai/types/api_retrieval_method.py diff --git a/src/do_gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py similarity index 100% rename from src/do_gradientai/types/api_workspace.py rename to src/gradientai/types/api_workspace.py diff --git a/src/do_gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/chat/__init__.py rename to src/gradientai/types/chat/__init__.py diff --git a/src/do_gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_params.py rename to src/gradientai/types/chat/completion_create_params.py diff --git a/src/do_gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_response.py rename to src/gradientai/types/chat/completion_create_response.py diff --git a/src/do_gradientai/types/droplet_backup_policy.py b/src/gradientai/types/droplet_backup_policy.py similarity index 100% rename from src/do_gradientai/types/droplet_backup_policy.py rename to src/gradientai/types/droplet_backup_policy.py diff --git a/src/do_gradientai/types/droplet_backup_policy_param.py b/src/gradientai/types/droplet_backup_policy_param.py similarity index 100% rename from src/do_gradientai/types/droplet_backup_policy_param.py rename to src/gradientai/types/droplet_backup_policy_param.py diff --git a/src/do_gradientai/types/gpu_droplet_create_params.py b/src/gradientai/types/gpu_droplet_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_create_params.py rename to src/gradientai/types/gpu_droplet_create_params.py diff --git a/src/do_gradientai/types/gpu_droplet_create_response.py b/src/gradientai/types/gpu_droplet_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_create_response.py rename to src/gradientai/types/gpu_droplet_create_response.py diff --git a/src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/gradientai/types/gpu_droplet_delete_by_tag_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py rename to src/gradientai/types/gpu_droplet_delete_by_tag_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_firewalls_params.py b/src/gradientai/types/gpu_droplet_list_firewalls_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_firewalls_params.py rename to src/gradientai/types/gpu_droplet_list_firewalls_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_firewalls_response.py b/src/gradientai/types/gpu_droplet_list_firewalls_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_firewalls_response.py rename to src/gradientai/types/gpu_droplet_list_firewalls_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_kernels_params.py b/src/gradientai/types/gpu_droplet_list_kernels_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_kernels_params.py rename to src/gradientai/types/gpu_droplet_list_kernels_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_kernels_response.py b/src/gradientai/types/gpu_droplet_list_kernels_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_kernels_response.py rename to src/gradientai/types/gpu_droplet_list_kernels_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_neighbors_response.py b/src/gradientai/types/gpu_droplet_list_neighbors_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_neighbors_response.py rename to src/gradientai/types/gpu_droplet_list_neighbors_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_params.py b/src/gradientai/types/gpu_droplet_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_params.py rename to src/gradientai/types/gpu_droplet_list_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_response.py b/src/gradientai/types/gpu_droplet_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_response.py rename to src/gradientai/types/gpu_droplet_list_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_snapshots_params.py b/src/gradientai/types/gpu_droplet_list_snapshots_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_snapshots_params.py rename to src/gradientai/types/gpu_droplet_list_snapshots_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_snapshots_response.py b/src/gradientai/types/gpu_droplet_list_snapshots_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_snapshots_response.py rename to src/gradientai/types/gpu_droplet_list_snapshots_response.py diff --git a/src/do_gradientai/types/gpu_droplet_retrieve_response.py b/src/gradientai/types/gpu_droplet_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_retrieve_response.py rename to src/gradientai/types/gpu_droplet_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/__init__.py b/src/gradientai/types/gpu_droplets/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/__init__.py rename to src/gradientai/types/gpu_droplets/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/account/__init__.py b/src/gradientai/types/gpu_droplets/account/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/__init__.py rename to src/gradientai/types/gpu_droplets/account/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_create_params.py b/src/gradientai/types/gpu_droplets/account/key_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_create_params.py rename to src/gradientai/types/gpu_droplets/account/key_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_create_response.py b/src/gradientai/types/gpu_droplets/account/key_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_create_response.py rename to src/gradientai/types/gpu_droplets/account/key_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_list_params.py b/src/gradientai/types/gpu_droplets/account/key_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_list_params.py rename to src/gradientai/types/gpu_droplets/account/key_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_list_response.py b/src/gradientai/types/gpu_droplets/account/key_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_list_response.py rename to src/gradientai/types/gpu_droplets/account/key_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py rename to src/gradientai/types/gpu_droplets/account/key_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_update_params.py b/src/gradientai/types/gpu_droplets/account/key_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_update_params.py rename to src/gradientai/types/gpu_droplets/account/key_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_update_response.py b/src/gradientai/types/gpu_droplets/account/key_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_update_response.py rename to src/gradientai/types/gpu_droplets/account/key_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py rename to src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py diff --git a/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py rename to src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_initiate_params.py b/src/gradientai/types/gpu_droplets/action_initiate_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_initiate_params.py rename to src/gradientai/types/gpu_droplets/action_initiate_params.py diff --git a/src/do_gradientai/types/gpu_droplets/action_initiate_response.py b/src/gradientai/types/gpu_droplets/action_initiate_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_initiate_response.py rename to src/gradientai/types/gpu_droplets/action_initiate_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_list_params.py b/src/gradientai/types/gpu_droplets/action_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_list_params.py rename to src/gradientai/types/gpu_droplets/action_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/action_list_response.py b/src/gradientai/types/gpu_droplets/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_list_response.py rename to src/gradientai/types/gpu_droplets/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_retrieve_response.py rename to src/gradientai/types/gpu_droplets/action_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/associated_resource.py b/src/gradientai/types/gpu_droplets/associated_resource.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/associated_resource.py rename to src/gradientai/types/gpu_droplets/associated_resource.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_create_params.py b/src/gradientai/types/gpu_droplets/autoscale_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_create_params.py rename to src/gradientai/types/gpu_droplets/autoscale_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_create_response.py b/src/gradientai/types/gpu_droplets/autoscale_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_create_response.py rename to src/gradientai/types/gpu_droplets/autoscale_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py rename to src/gradientai/types/gpu_droplets/autoscale_list_history_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py rename to src/gradientai/types/gpu_droplets/autoscale_list_history_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py rename to src/gradientai/types/gpu_droplets/autoscale_list_members_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py rename to src/gradientai/types/gpu_droplets/autoscale_list_members_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_params.py rename to src/gradientai/types/gpu_droplets/autoscale_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_response.py rename to src/gradientai/types/gpu_droplets/autoscale_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool.py b/src/gradientai/types/gpu_droplets/autoscale_pool.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool.py rename to src/gradientai/types/gpu_droplets/autoscale_pool.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py rename to src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py rename to src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py rename to src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py rename to src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py rename to src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py rename to src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py rename to src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_update_params.py b/src/gradientai/types/gpu_droplets/autoscale_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_update_params.py rename to src/gradientai/types/gpu_droplets/autoscale_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_update_response.py b/src/gradientai/types/gpu_droplets/autoscale_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_update_response.py rename to src/gradientai/types/gpu_droplets/autoscale_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_params.py b/src/gradientai/types/gpu_droplets/backup_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_params.py rename to src/gradientai/types/gpu_droplets/backup_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/gradientai/types/gpu_droplets/backup_list_policies_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py rename to src/gradientai/types/gpu_droplets/backup_list_policies_params.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/gradientai/types/gpu_droplets/backup_list_policies_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py rename to src/gradientai/types/gpu_droplets/backup_list_policies_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_response.py b/src/gradientai/types/gpu_droplets/backup_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_response.py rename to src/gradientai/types/gpu_droplets/backup_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py rename to src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py rename to src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py diff --git a/src/do_gradientai/types/gpu_droplets/current_utilization.py b/src/gradientai/types/gpu_droplets/current_utilization.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/current_utilization.py rename to src/gradientai/types/gpu_droplets/current_utilization.py diff --git a/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py rename to src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py diff --git a/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py rename to src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py diff --git a/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py rename to src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py rename to src/gradientai/types/gpu_droplets/destroyed_associated_resource.py diff --git a/src/do_gradientai/types/gpu_droplets/domains.py b/src/gradientai/types/gpu_droplets/domains.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/domains.py rename to src/gradientai/types/gpu_droplets/domains.py diff --git a/src/do_gradientai/types/gpu_droplets/domains_param.py b/src/gradientai/types/gpu_droplets/domains_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/domains_param.py rename to src/gradientai/types/gpu_droplets/domains_param.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall.py b/src/gradientai/types/gpu_droplets/firewall.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall.py rename to src/gradientai/types/gpu_droplets/firewall.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_create_params.py b/src/gradientai/types/gpu_droplets/firewall_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_create_params.py rename to src/gradientai/types/gpu_droplets/firewall_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_create_response.py b/src/gradientai/types/gpu_droplets/firewall_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_create_response.py rename to src/gradientai/types/gpu_droplets/firewall_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_list_params.py b/src/gradientai/types/gpu_droplets/firewall_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_list_params.py rename to src/gradientai/types/gpu_droplets/firewall_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_list_response.py b/src/gradientai/types/gpu_droplets/firewall_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_list_response.py rename to src/gradientai/types/gpu_droplets/firewall_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_param.py b/src/gradientai/types/gpu_droplets/firewall_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_param.py rename to src/gradientai/types/gpu_droplets/firewall_param.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py rename to src/gradientai/types/gpu_droplets/firewall_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_update_params.py b/src/gradientai/types/gpu_droplets/firewall_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_update_params.py rename to src/gradientai/types/gpu_droplets/firewall_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_update_response.py b/src/gradientai/types/gpu_droplets/firewall_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_update_response.py rename to src/gradientai/types/gpu_droplets/firewall_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/__init__.py b/src/gradientai/types/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/__init__.py rename to src/gradientai/types/gpu_droplets/firewalls/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py rename to src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py rename to src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py rename to src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py rename to src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py rename to src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py rename to src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip.py b/src/gradientai/types/gpu_droplets/floating_ip.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip.py rename to src/gradientai/types/gpu_droplets/floating_ip.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/gradientai/types/gpu_droplets/floating_ip_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py rename to src/gradientai/types/gpu_droplets/floating_ip_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/gradientai/types/gpu_droplets/floating_ip_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py rename to src/gradientai/types/gpu_droplets/floating_ip_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/gradientai/types/gpu_droplets/floating_ip_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py rename to src/gradientai/types/gpu_droplets/floating_ip_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/gradientai/types/gpu_droplets/floating_ip_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py rename to src/gradientai/types/gpu_droplets/floating_ip_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py rename to src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/gradientai/types/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py rename to src/gradientai/types/gpu_droplets/floating_ips/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py rename to src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py rename to src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py rename to src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py rename to src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/forwarding_rule.py b/src/gradientai/types/gpu_droplets/forwarding_rule.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/forwarding_rule.py rename to src/gradientai/types/gpu_droplets/forwarding_rule.py diff --git a/src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/gradientai/types/gpu_droplets/forwarding_rule_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py rename to src/gradientai/types/gpu_droplets/forwarding_rule_param.py diff --git a/src/do_gradientai/types/gpu_droplets/glb_settings.py b/src/gradientai/types/gpu_droplets/glb_settings.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/glb_settings.py rename to src/gradientai/types/gpu_droplets/glb_settings.py diff --git a/src/do_gradientai/types/gpu_droplets/glb_settings_param.py b/src/gradientai/types/gpu_droplets/glb_settings_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/glb_settings_param.py rename to src/gradientai/types/gpu_droplets/glb_settings_param.py diff --git a/src/do_gradientai/types/gpu_droplets/health_check.py b/src/gradientai/types/gpu_droplets/health_check.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/health_check.py rename to src/gradientai/types/gpu_droplets/health_check.py diff --git a/src/do_gradientai/types/gpu_droplets/health_check_param.py b/src/gradientai/types/gpu_droplets/health_check_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/health_check_param.py rename to src/gradientai/types/gpu_droplets/health_check_param.py diff --git a/src/do_gradientai/types/gpu_droplets/image_create_params.py b/src/gradientai/types/gpu_droplets/image_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_create_params.py rename to src/gradientai/types/gpu_droplets/image_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/image_create_response.py b/src/gradientai/types/gpu_droplets/image_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_create_response.py rename to src/gradientai/types/gpu_droplets/image_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/image_list_params.py b/src/gradientai/types/gpu_droplets/image_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_list_params.py rename to src/gradientai/types/gpu_droplets/image_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/image_list_response.py b/src/gradientai/types/gpu_droplets/image_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_list_response.py rename to src/gradientai/types/gpu_droplets/image_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/image_retrieve_response.py b/src/gradientai/types/gpu_droplets/image_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_retrieve_response.py rename to src/gradientai/types/gpu_droplets/image_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/image_update_params.py b/src/gradientai/types/gpu_droplets/image_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_update_params.py rename to src/gradientai/types/gpu_droplets/image_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/image_update_response.py b/src/gradientai/types/gpu_droplets/image_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_update_response.py rename to src/gradientai/types/gpu_droplets/image_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/images/__init__.py b/src/gradientai/types/gpu_droplets/images/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/images/__init__.py rename to src/gradientai/types/gpu_droplets/images/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/images/action_create_params.py b/src/gradientai/types/gpu_droplets/images/action_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/images/action_create_params.py rename to src/gradientai/types/gpu_droplets/images/action_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/images/action_list_response.py b/src/gradientai/types/gpu_droplets/images/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/images/action_list_response.py rename to src/gradientai/types/gpu_droplets/images/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/lb_firewall.py b/src/gradientai/types/gpu_droplets/lb_firewall.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/lb_firewall.py rename to src/gradientai/types/gpu_droplets/lb_firewall.py diff --git a/src/do_gradientai/types/gpu_droplets/lb_firewall_param.py b/src/gradientai/types/gpu_droplets/lb_firewall_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/lb_firewall_param.py rename to src/gradientai/types/gpu_droplets/lb_firewall_param.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer.py b/src/gradientai/types/gpu_droplets/load_balancer.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer.py rename to src/gradientai/types/gpu_droplets/load_balancer.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/gradientai/types/gpu_droplets/load_balancer_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py rename to src/gradientai/types/gpu_droplets/load_balancer_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/gradientai/types/gpu_droplets/load_balancer_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py rename to src/gradientai/types/gpu_droplets/load_balancer_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/gradientai/types/gpu_droplets/load_balancer_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py rename to src/gradientai/types/gpu_droplets/load_balancer_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/gradientai/types/gpu_droplets/load_balancer_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py rename to src/gradientai/types/gpu_droplets/load_balancer_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py rename to src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/gradientai/types/gpu_droplets/load_balancer_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py rename to src/gradientai/types/gpu_droplets/load_balancer_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/gradientai/types/gpu_droplets/load_balancer_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py rename to src/gradientai/types/gpu_droplets/load_balancer_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/gradientai/types/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py rename to src/gradientai/types/gpu_droplets/load_balancers/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py rename to src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py rename to src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py rename to src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py rename to src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/size_list_params.py b/src/gradientai/types/gpu_droplets/size_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/size_list_params.py rename to src/gradientai/types/gpu_droplets/size_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/size_list_response.py b/src/gradientai/types/gpu_droplets/size_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/size_list_response.py rename to src/gradientai/types/gpu_droplets/size_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/snapshot_list_params.py b/src/gradientai/types/gpu_droplets/snapshot_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/snapshot_list_params.py rename to src/gradientai/types/gpu_droplets/snapshot_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/snapshot_list_response.py b/src/gradientai/types/gpu_droplets/snapshot_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/snapshot_list_response.py rename to src/gradientai/types/gpu_droplets/snapshot_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py rename to src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/sticky_sessions.py b/src/gradientai/types/gpu_droplets/sticky_sessions.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/sticky_sessions.py rename to src/gradientai/types/gpu_droplets/sticky_sessions.py diff --git a/src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/gradientai/types/gpu_droplets/sticky_sessions_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py rename to src/gradientai/types/gpu_droplets/sticky_sessions_param.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_create_params.py b/src/gradientai/types/gpu_droplets/volume_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_create_params.py rename to src/gradientai/types/gpu_droplets/volume_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_create_response.py b/src/gradientai/types/gpu_droplets/volume_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_create_response.py rename to src/gradientai/types/gpu_droplets/volume_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py rename to src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_list_params.py b/src/gradientai/types/gpu_droplets/volume_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_list_params.py rename to src/gradientai/types/gpu_droplets/volume_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_list_response.py b/src/gradientai/types/gpu_droplets/volume_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_list_response.py rename to src/gradientai/types/gpu_droplets/volume_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/gradientai/types/gpu_droplets/volume_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py rename to src/gradientai/types/gpu_droplets/volume_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/__init__.py b/src/gradientai/types/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/__init__.py rename to src/gradientai/types/gpu_droplets/volumes/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py rename to src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py rename to src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py rename to src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py rename to src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/gradientai/types/gpu_droplets/volumes/action_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py rename to src/gradientai/types/gpu_droplets/volumes/action_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/gradientai/types/gpu_droplets/volumes/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py rename to src/gradientai/types/gpu_droplets/volumes/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py rename to src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py rename to src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py rename to src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py rename to src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py rename to src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py rename to src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py rename to src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/volume_action.py b/src/gradientai/types/gpu_droplets/volumes/volume_action.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/volume_action.py rename to src/gradientai/types/gpu_droplets/volumes/volume_action.py diff --git a/src/do_gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py similarity index 100% rename from src/do_gradientai/types/inference/__init__.py rename to src/gradientai/types/inference/__init__.py diff --git a/src/do_gradientai/types/inference/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_params.py rename to src/gradientai/types/inference/api_key_create_params.py diff --git a/src/do_gradientai/types/inference/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_response.py rename to src/gradientai/types/inference/api_key_create_response.py diff --git a/src/do_gradientai/types/inference/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_delete_response.py rename to src/gradientai/types/inference/api_key_delete_response.py diff --git a/src/do_gradientai/types/inference/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_params.py rename to src/gradientai/types/inference/api_key_list_params.py diff --git a/src/do_gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_response.py rename to src/gradientai/types/inference/api_key_list_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_params.py rename to src/gradientai/types/inference/api_key_update_params.py diff --git a/src/do_gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_regenerate_response.py rename to src/gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_response.py rename to src/gradientai/types/inference/api_key_update_response.py diff --git a/src/do_gradientai/types/inference/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/do_gradientai/types/inference/api_model_api_key_info.py rename to src/gradientai/types/inference/api_model_api_key_info.py diff --git a/src/do_gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_params.py rename to src/gradientai/types/knowledge_base_create_params.py diff --git a/src/do_gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_response.py rename to src/gradientai/types/knowledge_base_create_response.py diff --git a/src/do_gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_delete_response.py rename to src/gradientai/types/knowledge_base_delete_response.py diff --git a/src/do_gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_params.py rename to src/gradientai/types/knowledge_base_list_params.py diff --git a/src/do_gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_response.py rename to src/gradientai/types/knowledge_base_list_response.py diff --git a/src/do_gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_retrieve_response.py rename to src/gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_params.py rename to src/gradientai/types/knowledge_base_update_params.py diff --git a/src/do_gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_response.py rename to src/gradientai/types/knowledge_base_update_response.py diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/__init__.py rename to src/gradientai/types/knowledge_bases/__init__.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/gradientai/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexing_job.py rename to src/gradientai/types/knowledge_bases/api_indexing_job.py diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_params.py rename to src/gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_response.py rename to src/gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_params.py rename to src/gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_response.py rename to src/gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/do_gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py similarity index 100% rename from src/do_gradientai/types/model_list_response.py rename to src/gradientai/types/model_list_response.py diff --git a/src/do_gradientai/types/model_retrieve_response.py b/src/gradientai/types/model_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/model_retrieve_response.py rename to src/gradientai/types/model_retrieve_response.py diff --git a/src/do_gradientai/types/models/__init__.py b/src/gradientai/types/models/__init__.py similarity index 100% rename from src/do_gradientai/types/models/__init__.py rename to src/gradientai/types/models/__init__.py diff --git a/src/do_gradientai/types/models/providers/__init__.py b/src/gradientai/types/models/providers/__init__.py similarity index 100% rename from src/do_gradientai/types/models/providers/__init__.py rename to src/gradientai/types/models/providers/__init__.py diff --git a/src/do_gradientai/types/models/providers/anthropic_create_params.py b/src/gradientai/types/models/providers/anthropic_create_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_create_params.py rename to src/gradientai/types/models/providers/anthropic_create_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_create_response.py b/src/gradientai/types/models/providers/anthropic_create_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_create_response.py rename to src/gradientai/types/models/providers/anthropic_create_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_delete_response.py b/src/gradientai/types/models/providers/anthropic_delete_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_delete_response.py rename to src/gradientai/types/models/providers/anthropic_delete_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py b/src/gradientai/types/models/providers/anthropic_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_agents_params.py rename to src/gradientai/types/models/providers/anthropic_list_agents_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py b/src/gradientai/types/models/providers/anthropic_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_agents_response.py rename to src/gradientai/types/models/providers/anthropic_list_agents_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_params.py b/src/gradientai/types/models/providers/anthropic_list_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_params.py rename to src/gradientai/types/models/providers/anthropic_list_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_response.py b/src/gradientai/types/models/providers/anthropic_list_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_response.py rename to src/gradientai/types/models/providers/anthropic_list_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py b/src/gradientai/types/models/providers/anthropic_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_retrieve_response.py rename to src/gradientai/types/models/providers/anthropic_retrieve_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_update_params.py b/src/gradientai/types/models/providers/anthropic_update_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_update_params.py rename to src/gradientai/types/models/providers/anthropic_update_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_update_response.py b/src/gradientai/types/models/providers/anthropic_update_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_update_response.py rename to src/gradientai/types/models/providers/anthropic_update_response.py diff --git a/src/do_gradientai/types/models/providers/openai_create_params.py b/src/gradientai/types/models/providers/openai_create_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_create_params.py rename to src/gradientai/types/models/providers/openai_create_params.py diff --git a/src/do_gradientai/types/models/providers/openai_create_response.py b/src/gradientai/types/models/providers/openai_create_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_create_response.py rename to src/gradientai/types/models/providers/openai_create_response.py diff --git a/src/do_gradientai/types/models/providers/openai_delete_response.py b/src/gradientai/types/models/providers/openai_delete_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_delete_response.py rename to src/gradientai/types/models/providers/openai_delete_response.py diff --git a/src/do_gradientai/types/models/providers/openai_list_params.py b/src/gradientai/types/models/providers/openai_list_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_list_params.py rename to src/gradientai/types/models/providers/openai_list_params.py diff --git a/src/do_gradientai/types/models/providers/openai_list_response.py b/src/gradientai/types/models/providers/openai_list_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_list_response.py rename to src/gradientai/types/models/providers/openai_list_response.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py rename to src/gradientai/types/models/providers/openai_retrieve_agents_params.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py rename to src/gradientai/types/models/providers/openai_retrieve_agents_response.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_response.py b/src/gradientai/types/models/providers/openai_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_response.py rename to src/gradientai/types/models/providers/openai_retrieve_response.py diff --git a/src/do_gradientai/types/models/providers/openai_update_params.py b/src/gradientai/types/models/providers/openai_update_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_update_params.py rename to src/gradientai/types/models/providers/openai_update_params.py diff --git a/src/do_gradientai/types/models/providers/openai_update_response.py b/src/gradientai/types/models/providers/openai_update_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_update_response.py rename to src/gradientai/types/models/providers/openai_update_response.py diff --git a/src/do_gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py similarity index 100% rename from src/do_gradientai/types/region_list_params.py rename to src/gradientai/types/region_list_params.py diff --git a/src/do_gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py similarity index 100% rename from src/do_gradientai/types/region_list_response.py rename to src/gradientai/types/region_list_response.py diff --git a/src/do_gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py similarity index 100% rename from src/do_gradientai/types/shared/__init__.py rename to src/gradientai/types/shared/__init__.py diff --git a/src/do_gradientai/types/shared/action.py b/src/gradientai/types/shared/action.py similarity index 100% rename from src/do_gradientai/types/shared/action.py rename to src/gradientai/types/shared/action.py diff --git a/src/do_gradientai/types/shared/action_link.py b/src/gradientai/types/shared/action_link.py similarity index 100% rename from src/do_gradientai/types/shared/action_link.py rename to src/gradientai/types/shared/action_link.py diff --git a/src/do_gradientai/types/shared/api_links.py b/src/gradientai/types/shared/api_links.py similarity index 100% rename from src/do_gradientai/types/shared/api_links.py rename to src/gradientai/types/shared/api_links.py diff --git a/src/do_gradientai/types/shared/api_meta.py b/src/gradientai/types/shared/api_meta.py similarity index 100% rename from src/do_gradientai/types/shared/api_meta.py rename to src/gradientai/types/shared/api_meta.py diff --git a/src/do_gradientai/types/shared/backward_links.py b/src/gradientai/types/shared/backward_links.py similarity index 100% rename from src/do_gradientai/types/shared/backward_links.py rename to src/gradientai/types/shared/backward_links.py diff --git a/src/do_gradientai/types/shared/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py similarity index 100% rename from src/do_gradientai/types/shared/chat_completion_chunk.py rename to src/gradientai/types/shared/chat_completion_chunk.py diff --git a/src/do_gradientai/types/shared/chat_completion_token_logprob.py b/src/gradientai/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/do_gradientai/types/shared/chat_completion_token_logprob.py rename to src/gradientai/types/shared/chat_completion_token_logprob.py diff --git a/src/do_gradientai/types/shared/completion_usage.py b/src/gradientai/types/shared/completion_usage.py similarity index 100% rename from src/do_gradientai/types/shared/completion_usage.py rename to src/gradientai/types/shared/completion_usage.py diff --git a/src/do_gradientai/types/shared/disk_info.py b/src/gradientai/types/shared/disk_info.py similarity index 100% rename from src/do_gradientai/types/shared/disk_info.py rename to src/gradientai/types/shared/disk_info.py diff --git a/src/do_gradientai/types/shared/droplet.py b/src/gradientai/types/shared/droplet.py similarity index 100% rename from src/do_gradientai/types/shared/droplet.py rename to src/gradientai/types/shared/droplet.py diff --git a/src/do_gradientai/types/shared/droplet_next_backup_window.py b/src/gradientai/types/shared/droplet_next_backup_window.py similarity index 100% rename from src/do_gradientai/types/shared/droplet_next_backup_window.py rename to src/gradientai/types/shared/droplet_next_backup_window.py diff --git a/src/do_gradientai/types/shared/firewall_rule_target.py b/src/gradientai/types/shared/firewall_rule_target.py similarity index 100% rename from src/do_gradientai/types/shared/firewall_rule_target.py rename to src/gradientai/types/shared/firewall_rule_target.py diff --git a/src/do_gradientai/types/shared/forward_links.py b/src/gradientai/types/shared/forward_links.py similarity index 100% rename from src/do_gradientai/types/shared/forward_links.py rename to src/gradientai/types/shared/forward_links.py diff --git a/src/do_gradientai/types/shared/garbage_collection.py b/src/gradientai/types/shared/garbage_collection.py similarity index 100% rename from src/do_gradientai/types/shared/garbage_collection.py rename to src/gradientai/types/shared/garbage_collection.py diff --git a/src/do_gradientai/types/shared/gpu_info.py b/src/gradientai/types/shared/gpu_info.py similarity index 100% rename from src/do_gradientai/types/shared/gpu_info.py rename to src/gradientai/types/shared/gpu_info.py diff --git a/src/do_gradientai/types/shared/image.py b/src/gradientai/types/shared/image.py similarity index 100% rename from src/do_gradientai/types/shared/image.py rename to src/gradientai/types/shared/image.py diff --git a/src/do_gradientai/types/shared/kernel.py b/src/gradientai/types/shared/kernel.py similarity index 100% rename from src/do_gradientai/types/shared/kernel.py rename to src/gradientai/types/shared/kernel.py diff --git a/src/do_gradientai/types/shared/meta_properties.py b/src/gradientai/types/shared/meta_properties.py similarity index 100% rename from src/do_gradientai/types/shared/meta_properties.py rename to src/gradientai/types/shared/meta_properties.py diff --git a/src/do_gradientai/types/shared/network_v4.py b/src/gradientai/types/shared/network_v4.py similarity index 100% rename from src/do_gradientai/types/shared/network_v4.py rename to src/gradientai/types/shared/network_v4.py diff --git a/src/do_gradientai/types/shared/network_v6.py b/src/gradientai/types/shared/network_v6.py similarity index 100% rename from src/do_gradientai/types/shared/network_v6.py rename to src/gradientai/types/shared/network_v6.py diff --git a/src/do_gradientai/types/shared/page_links.py b/src/gradientai/types/shared/page_links.py similarity index 100% rename from src/do_gradientai/types/shared/page_links.py rename to src/gradientai/types/shared/page_links.py diff --git a/src/do_gradientai/types/shared/region.py b/src/gradientai/types/shared/region.py similarity index 100% rename from src/do_gradientai/types/shared/region.py rename to src/gradientai/types/shared/region.py diff --git a/src/do_gradientai/types/shared/size.py b/src/gradientai/types/shared/size.py similarity index 100% rename from src/do_gradientai/types/shared/size.py rename to src/gradientai/types/shared/size.py diff --git a/src/do_gradientai/types/shared/snapshots.py b/src/gradientai/types/shared/snapshots.py similarity index 100% rename from src/do_gradientai/types/shared/snapshots.py rename to src/gradientai/types/shared/snapshots.py diff --git a/src/do_gradientai/types/shared/subscription.py b/src/gradientai/types/shared/subscription.py similarity index 100% rename from src/do_gradientai/types/shared/subscription.py rename to src/gradientai/types/shared/subscription.py diff --git a/src/do_gradientai/types/shared/subscription_tier_base.py b/src/gradientai/types/shared/subscription_tier_base.py similarity index 100% rename from src/do_gradientai/types/shared/subscription_tier_base.py rename to src/gradientai/types/shared/subscription_tier_base.py diff --git a/src/do_gradientai/types/shared/vpc_peering.py b/src/gradientai/types/shared/vpc_peering.py similarity index 100% rename from src/do_gradientai/types/shared/vpc_peering.py rename to src/gradientai/types/shared/vpc_peering.py diff --git a/src/do_gradientai/types/shared_params/__init__.py b/src/gradientai/types/shared_params/__init__.py similarity index 100% rename from src/do_gradientai/types/shared_params/__init__.py rename to src/gradientai/types/shared_params/__init__.py diff --git a/src/do_gradientai/types/shared_params/firewall_rule_target.py b/src/gradientai/types/shared_params/firewall_rule_target.py similarity index 100% rename from src/do_gradientai/types/shared_params/firewall_rule_target.py rename to src/gradientai/types/shared_params/firewall_rule_target.py diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index d80b5c09..dc13cd85 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.chat import CompletionCreateResponse +from gradientai.types.agents.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 27ab4a27..6b8f8bc7 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ModelListResponse +from gradientai.types.agents.evaluation_metrics import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index 2728393e..ea39c474 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ( +from gradientai.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 37d39018..635721b3 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 1e5275fe..c29511f5 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 56edd598..0413591e 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index 303d85d6..d64367ae 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 9d443f16..2ea44e6b 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index ae986abc..e9083ba3 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 624446e0..4390d1d2 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 7ac99316..2ac20d89 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 256a4757..d04e8c90 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 158856ed..d6151470 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 95b02106..46c8b431 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py index cf168f61..acad3575 100644 --- a/tests/api_resources/gpu_droplets/account/test_keys.py +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.account import ( +from gradientai.types.gpu_droplets.account import ( KeyListResponse, KeyCreateResponse, KeyUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py index 819a5e6e..67d132aa 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py index b2eab40c..446a11af 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_rules.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py index 25c9362b..a0227c61 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_tags.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index ad26db8a..82a12d2e 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.floating_ips import ( +from gradientai.types.gpu_droplets.floating_ips import ( ActionListResponse, ActionCreateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index 35861bcb..4d59c85b 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -7,10 +7,10 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.shared import Action -from do_gradientai.types.gpu_droplets.images import ActionListResponse +from gradientai.types.shared import Action +from gradientai.types.gpu_droplets.images import ActionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py index f22213e2..333567f4 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py index d53bd0db..ec6f7838 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py index 74e45b44..5e443dd8 100644 --- a/tests/api_resources/gpu_droplets/test_actions.py +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( ActionListResponse, ActionInitiateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index cec0371d..42164666 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( AutoscaleListResponse, AutoscaleCreateResponse, AutoscaleUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index 334c701f..f8f72140 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupRetrievePolicyResponse, diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index 2aef1fce..b6922feb 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( DestroyWithAssociatedResourceListResponse, DestroyWithAssociatedResourceCheckStatusResponse, ) diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 6d98ebe8..537fe7d2 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( FirewallListResponse, FirewallCreateResponse, FirewallUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index 9b8b3183..830e9b39 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( FloatingIPListResponse, FloatingIPCreateResponse, FloatingIPRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index 5a2a7c0c..7be6a786 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( ImageListResponse, ImageCreateResponse, ImageUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index b96c6d52..c1ce1ce2 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( LoadBalancerListResponse, LoadBalancerCreateResponse, LoadBalancerUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py index 1ff11cd7..eda73b1e 100644 --- a/tests/api_resources/gpu_droplets/test_sizes.py +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import SizeListResponse +from gradientai.types.gpu_droplets import SizeListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py index 413dd993..5d7132c2 100644 --- a/tests/api_resources/gpu_droplets/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse +from gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index baf6b430..64bcb4c5 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradientai.types.gpu_droplets import ( VolumeListResponse, VolumeCreateResponse, VolumeRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py index 40d9b4eb..d5338c97 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_actions.py +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.volumes import ( +from gradientai.types.gpu_droplets.volumes import ( ActionListResponse, ActionRetrieveResponse, ActionInitiateByIDResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index 4884d372..8b72305c 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.volumes import ( +from gradientai.types.gpu_droplets.volumes import ( SnapshotListResponse, SnapshotCreateResponse, SnapshotRetrieveResponse, diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 85ad49da..157a2e3d 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.inference import ( +from gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index ebb0841a..55b056b8 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index b0185941..ed32d7f8 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 6b3d99a3..c61a97ea 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( AnthropicListResponse, AnthropicCreateResponse, AnthropicDeleteResponse, diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index bdde97ca..7fde1a69 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( OpenAIListResponse, OpenAICreateResponse, OpenAIDeleteResponse, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2f68a06f..8a6a7d69 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index cbc7e63b..22f3d2d0 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( GPUDropletListResponse, GPUDropletCreateResponse, GPUDropletRetrieveResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index c4d179cc..8a331b52 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 803c5d5a..fe837973 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ModelListResponse, ModelRetrieveResponse +from gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index f331342e..4f232293 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/conftest.py b/tests/conftest.py index 1e102b94..5b24e1c2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from do_gradientai._utils import is_dict +from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("do_gradientai").setLevel(logging.DEBUG) +logging.getLogger("gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index 85b7d31a..61013a0a 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,12 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from do_gradientai._types import Omit -from do_gradientai._models import BaseModel, FinalRequestOptions -from do_gradientai._streaming import Stream, AsyncStream -from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from do_gradientai._base_client import ( +from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from gradientai._types import Omit +from gradientai._models import BaseModel, FinalRequestOptions +from gradientai._streaming import Stream, AsyncStream +from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -259,10 +259,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -873,7 +873,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -891,7 +891,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) @@ -909,7 +909,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -948,7 +948,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -980,7 +980,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1261,10 +1261,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1880,7 +1880,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1900,7 +1900,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1920,7 +1920,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1960,7 +1960,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1993,7 +1993,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -2036,8 +2036,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from do_gradientai._utils import asyncify - from do_gradientai._base_client import get_platform + from gradientai._utils import asyncify + from gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 5a98ce1b..9d1579a8 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from do_gradientai._utils import deepcopy_minimal +from gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 341e65ae..2905d59c 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from do_gradientai._types import FileTypes -from do_gradientai._utils import extract_files +from gradientai._types import FileTypes +from gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index ff7914bb..4a723313 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from do_gradientai._files import to_httpx_files, async_to_httpx_files +from gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index bfbef61a..3a857584 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from do_gradientai._utils import PropertyInfo -from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from do_gradientai._models import BaseModel, construct_type +from gradientai._utils import PropertyInfo +from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index c9213571..9080377b 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from do_gradientai._qs import Querystring, stringify +from gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index 434e9491..c4e6b9d8 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from do_gradientai._utils import required_args +from gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 001ce776..1a8f241e 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from do_gradientai import BaseModel, GradientAI, AsyncGradientAI -from do_gradientai._response import ( +from gradientai import BaseModel, GradientAI, AsyncGradientAI +from gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from do_gradientai._streaming import Stream -from do_gradientai._base_client import FinalRequestOptions +from gradientai._streaming import Stream +from gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index c1ce8e85..cdb41a77 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from gradientai import GradientAI, AsyncGradientAI +from gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 30c06d6a..825fe048 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from do_gradientai._types import NOT_GIVEN, Base64FileInput -from do_gradientai._utils import ( +from gradientai._types import NOT_GIVEN, Base64FileInput +from gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from do_gradientai._compat import PYDANTIC_V2 -from do_gradientai._models import BaseModel +from gradientai._compat import PYDANTIC_V2 +from gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 9ce2e0d3..3856b2c9 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from do_gradientai._utils import LazyProxy +from gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index c9129fdc..66ad064f 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from do_gradientai._utils import extract_type_var_from_base +from gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index 9def7c60..b539ed2c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from do_gradientai._types import Omit, NoneType -from do_gradientai._utils import ( +from gradientai._types import Omit, NoneType +from gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from do_gradientai._models import BaseModel +from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From 5f3d307dcb140048011d2b093055e9ce9105dc78 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 10:23:19 +0000 Subject: [PATCH 131/200] feat(api): manual updates --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 48 +- api.md | 418 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{gradientai => do_gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{gradientai => do_gradientai}/_client.py | 0 src/{gradientai => do_gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{gradientai => do_gradientai}/_files.py | 0 src/{gradientai => do_gradientai}/_models.py | 0 src/{gradientai => do_gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{gradientai => do_gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{gradientai => do_gradientai}/_version.py | 2 +- src/{gradientai => do_gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/chat/__init__.py | 0 .../resources/agents/chat/chat.py | 0 .../resources/agents/chat/completions.py | 0 .../resources/agents/evaluation_datasets.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/evaluation_metrics.py | 0 .../agents/evaluation_metrics/models.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 0 .../workspaces/workspaces.py | 0 .../resources/agents/evaluation_runs.py | 0 .../resources/agents/evaluation_test_cases.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/routes.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/gpu_droplets/__init__.py | 0 .../gpu_droplets/account/__init__.py | 0 .../resources/gpu_droplets/account/account.py | 0 .../resources/gpu_droplets/account/keys.py | 0 .../resources/gpu_droplets/actions.py | 0 .../resources/gpu_droplets/autoscale.py | 0 .../resources/gpu_droplets/backups.py | 0 .../destroy_with_associated_resources.py | 0 .../gpu_droplets/firewalls/__init__.py | 0 .../gpu_droplets/firewalls/droplets.py | 0 .../gpu_droplets/firewalls/firewalls.py | 0 .../resources/gpu_droplets/firewalls/rules.py | 0 .../resources/gpu_droplets/firewalls/tags.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../gpu_droplets/floating_ips/actions.py | 0 .../gpu_droplets/floating_ips/floating_ips.py | 0 .../resources/gpu_droplets/gpu_droplets.py | 0 .../resources/gpu_droplets/images/__init__.py | 0 .../resources/gpu_droplets/images/actions.py | 0 .../resources/gpu_droplets/images/images.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../gpu_droplets/load_balancers/droplets.py | 0 .../load_balancers/forwarding_rules.py | 0 .../load_balancers/load_balancers.py | 0 .../resources/gpu_droplets/sizes.py | 0 .../resources/gpu_droplets/snapshots.py | 0 .../gpu_droplets/volumes/__init__.py | 0 .../resources/gpu_droplets/volumes/actions.py | 0 .../gpu_droplets/volumes/snapshots.py | 0 .../resources/gpu_droplets/volumes/volumes.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/indexing_jobs.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/models/__init__.py | 0 .../resources/models/models.py | 0 .../resources/models/providers/__init__.py | 0 .../resources/models/providers/anthropic.py | 0 .../resources/models/providers/openai.py | 0 .../resources/models/providers/providers.py | 0 .../resources/regions.py | 0 .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 .../types/agents/chat/__init__.py | 0 .../agents/chat/completion_create_params.py | 0 .../agents/chat/completion_create_response.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_regions_params.py | 0 ...evaluation_metric_list_regions_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/model_list_params.py | 0 .../evaluation_metrics/model_list_response.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_params.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/droplet_backup_policy.py | 0 .../types/droplet_backup_policy_param.py | 0 .../types/gpu_droplet_create_params.py | 0 .../types/gpu_droplet_create_response.py | 0 .../types/gpu_droplet_delete_by_tag_params.py | 0 .../gpu_droplet_list_firewalls_params.py | 0 .../gpu_droplet_list_firewalls_response.py | 0 .../types/gpu_droplet_list_kernels_params.py | 0 .../gpu_droplet_list_kernels_response.py | 0 .../gpu_droplet_list_neighbors_response.py | 0 .../types/gpu_droplet_list_params.py | 0 .../types/gpu_droplet_list_response.py | 0 .../gpu_droplet_list_snapshots_params.py | 0 .../gpu_droplet_list_snapshots_response.py | 0 .../types/gpu_droplet_retrieve_response.py | 0 .../types/gpu_droplets/__init__.py | 0 .../types/gpu_droplets/account/__init__.py | 0 .../gpu_droplets/account/key_create_params.py | 0 .../account/key_create_response.py | 0 .../gpu_droplets/account/key_list_params.py | 0 .../gpu_droplets/account/key_list_response.py | 0 .../account/key_retrieve_response.py | 0 .../gpu_droplets/account/key_update_params.py | 0 .../account/key_update_response.py | 0 .../action_bulk_initiate_params.py | 0 .../action_bulk_initiate_response.py | 0 .../gpu_droplets/action_initiate_params.py | 0 .../gpu_droplets/action_initiate_response.py | 0 .../types/gpu_droplets/action_list_params.py | 0 .../gpu_droplets/action_list_response.py | 0 .../gpu_droplets/action_retrieve_response.py | 0 .../types/gpu_droplets/associated_resource.py | 0 .../gpu_droplets/autoscale_create_params.py | 0 .../gpu_droplets/autoscale_create_response.py | 0 .../autoscale_list_history_params.py | 0 .../autoscale_list_history_response.py | 0 .../autoscale_list_members_params.py | 0 .../autoscale_list_members_response.py | 0 .../gpu_droplets/autoscale_list_params.py | 0 .../gpu_droplets/autoscale_list_response.py | 0 .../types/gpu_droplets/autoscale_pool.py | 0 .../autoscale_pool_droplet_template.py | 0 .../autoscale_pool_droplet_template_param.py | 0 .../autoscale_pool_dynamic_config.py | 0 .../autoscale_pool_dynamic_config_param.py | 0 .../autoscale_pool_static_config.py | 0 .../autoscale_pool_static_config_param.py | 0 .../autoscale_retrieve_response.py | 0 .../gpu_droplets/autoscale_update_params.py | 0 .../gpu_droplets/autoscale_update_response.py | 0 .../types/gpu_droplets/backup_list_params.py | 0 .../backup_list_policies_params.py | 0 .../backup_list_policies_response.py | 0 .../gpu_droplets/backup_list_response.py | 0 ...backup_list_supported_policies_response.py | 0 .../backup_retrieve_policy_response.py | 0 .../types/gpu_droplets/current_utilization.py | 0 ...sociated_resource_check_status_response.py | 0 ...ciated_resource_delete_selective_params.py | 0 ..._with_associated_resource_list_response.py | 0 .../destroyed_associated_resource.py | 0 .../types/gpu_droplets/domains.py | 0 .../types/gpu_droplets/domains_param.py | 0 .../types/gpu_droplets/firewall.py | 0 .../gpu_droplets/firewall_create_params.py | 0 .../gpu_droplets/firewall_create_response.py | 0 .../gpu_droplets/firewall_list_params.py | 0 .../gpu_droplets/firewall_list_response.py | 0 .../types/gpu_droplets/firewall_param.py | 0 .../firewall_retrieve_response.py | 0 .../gpu_droplets/firewall_update_params.py | 0 .../gpu_droplets/firewall_update_response.py | 0 .../types/gpu_droplets/firewalls/__init__.py | 0 .../firewalls/droplet_add_params.py | 0 .../firewalls/droplet_remove_params.py | 0 .../gpu_droplets/firewalls/rule_add_params.py | 0 .../firewalls/rule_remove_params.py | 0 .../gpu_droplets/firewalls/tag_add_params.py | 0 .../firewalls/tag_remove_params.py | 0 .../types/gpu_droplets/floating_ip.py | 0 .../gpu_droplets/floating_ip_create_params.py | 0 .../floating_ip_create_response.py | 0 .../gpu_droplets/floating_ip_list_params.py | 0 .../gpu_droplets/floating_ip_list_response.py | 0 .../floating_ip_retrieve_response.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../floating_ips/action_create_params.py | 0 .../floating_ips/action_create_response.py | 0 .../floating_ips/action_list_response.py | 0 .../floating_ips/action_retrieve_response.py | 0 .../types/gpu_droplets/forwarding_rule.py | 0 .../gpu_droplets/forwarding_rule_param.py | 0 .../types/gpu_droplets/glb_settings.py | 0 .../types/gpu_droplets/glb_settings_param.py | 0 .../types/gpu_droplets/health_check.py | 0 .../types/gpu_droplets/health_check_param.py | 0 .../types/gpu_droplets/image_create_params.py | 0 .../gpu_droplets/image_create_response.py | 0 .../types/gpu_droplets/image_list_params.py | 0 .../types/gpu_droplets/image_list_response.py | 0 .../gpu_droplets/image_retrieve_response.py | 0 .../types/gpu_droplets/image_update_params.py | 0 .../gpu_droplets/image_update_response.py | 0 .../types/gpu_droplets/images/__init__.py | 0 .../images/action_create_params.py | 0 .../images/action_list_response.py | 0 .../types/gpu_droplets/lb_firewall.py | 0 .../types/gpu_droplets/lb_firewall_param.py | 0 .../types/gpu_droplets/load_balancer.py | 0 .../load_balancer_create_params.py | 0 .../load_balancer_create_response.py | 0 .../gpu_droplets/load_balancer_list_params.py | 0 .../load_balancer_list_response.py | 0 .../load_balancer_retrieve_response.py | 0 .../load_balancer_update_params.py | 0 .../load_balancer_update_response.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../load_balancers/droplet_add_params.py | 0 .../load_balancers/droplet_remove_params.py | 0 .../forwarding_rule_add_params.py | 0 .../forwarding_rule_remove_params.py | 0 .../types/gpu_droplets/size_list_params.py | 0 .../types/gpu_droplets/size_list_response.py | 0 .../gpu_droplets/snapshot_list_params.py | 0 .../gpu_droplets/snapshot_list_response.py | 0 .../snapshot_retrieve_response.py | 0 .../types/gpu_droplets/sticky_sessions.py | 0 .../gpu_droplets/sticky_sessions_param.py | 0 .../gpu_droplets/volume_create_params.py | 0 .../gpu_droplets/volume_create_response.py | 0 .../volume_delete_by_name_params.py | 0 .../types/gpu_droplets/volume_list_params.py | 0 .../gpu_droplets/volume_list_response.py | 0 .../gpu_droplets/volume_retrieve_response.py | 0 .../types/gpu_droplets/volumes/__init__.py | 0 .../volumes/action_initiate_by_id_params.py | 0 .../volumes/action_initiate_by_id_response.py | 0 .../volumes/action_initiate_by_name_params.py | 0 .../action_initiate_by_name_response.py | 0 .../volumes/action_list_params.py | 0 .../volumes/action_list_response.py | 0 .../volumes/action_retrieve_params.py | 0 .../volumes/action_retrieve_response.py | 0 .../volumes/snapshot_create_params.py | 0 .../volumes/snapshot_create_response.py | 0 .../volumes/snapshot_list_params.py | 0 .../volumes/snapshot_list_response.py | 0 .../volumes/snapshot_retrieve_response.py | 0 .../gpu_droplets/volumes/volume_action.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/model_list_response.py | 0 .../types/model_retrieve_response.py | 0 .../types/models/__init__.py | 0 .../types/models/providers/__init__.py | 0 .../providers/anthropic_create_params.py | 0 .../providers/anthropic_create_response.py | 0 .../providers/anthropic_delete_response.py | 0 .../providers/anthropic_list_agents_params.py | 0 .../anthropic_list_agents_response.py | 0 .../models/providers/anthropic_list_params.py | 0 .../providers/anthropic_list_response.py | 0 .../providers/anthropic_retrieve_response.py | 0 .../providers/anthropic_update_params.py | 0 .../providers/anthropic_update_response.py | 0 .../models/providers/openai_create_params.py | 0 .../providers/openai_create_response.py | 0 .../providers/openai_delete_response.py | 0 .../models/providers/openai_list_params.py | 0 .../models/providers/openai_list_response.py | 0 .../openai_retrieve_agents_params.py | 0 .../openai_retrieve_agents_response.py | 0 .../providers/openai_retrieve_response.py | 0 .../models/providers/openai_update_params.py | 0 .../providers/openai_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/action.py | 0 .../types/shared/action_link.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../types/shared/backward_links.py | 0 .../types/shared/chat_completion_chunk.py | 0 .../shared/chat_completion_token_logprob.py | 0 .../types/shared/completion_usage.py | 0 .../types/shared/disk_info.py | 0 .../types/shared/droplet.py | 0 .../shared/droplet_next_backup_window.py | 0 .../types/shared/firewall_rule_target.py | 0 .../types/shared/forward_links.py | 0 .../types/shared/garbage_collection.py | 0 .../types/shared/gpu_info.py | 0 .../types/shared/image.py | 0 .../types/shared/kernel.py | 0 .../types/shared/meta_properties.py | 0 .../types/shared/network_v4.py | 0 .../types/shared/network_v6.py | 0 .../types/shared/page_links.py | 0 .../types/shared/region.py | 0 .../types/shared/size.py | 0 .../types/shared/snapshots.py | 0 .../types/shared/subscription.py | 0 .../types/shared/subscription_tier_base.py | 0 .../types/shared/vpc_peering.py | 0 .../types/shared_params/__init__.py | 0 .../shared_params/firewall_rule_target.py | 0 .../agents/chat/test_completions.py | 4 +- .../agents/evaluation_metrics/test_models.py | 4 +- .../evaluation_metrics/test_workspaces.py | 4 +- .../workspaces/test_agents.py | 4 +- tests/api_resources/agents/test_api_keys.py | 4 +- .../agents/test_evaluation_datasets.py | 4 +- .../agents/test_evaluation_metrics.py | 4 +- .../agents/test_evaluation_runs.py | 4 +- .../agents/test_evaluation_test_cases.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_routes.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../gpu_droplets/account/test_keys.py | 4 +- .../gpu_droplets/firewalls/test_droplets.py | 2 +- .../gpu_droplets/firewalls/test_rules.py | 2 +- .../gpu_droplets/firewalls/test_tags.py | 2 +- .../gpu_droplets/floating_ips/test_actions.py | 4 +- .../gpu_droplets/images/test_actions.py | 6 +- .../load_balancers/test_droplets.py | 2 +- .../load_balancers/test_forwarding_rules.py | 2 +- .../gpu_droplets/test_actions.py | 4 +- .../gpu_droplets/test_autoscale.py | 4 +- .../gpu_droplets/test_backups.py | 4 +- .../test_destroy_with_associated_resources.py | 4 +- .../gpu_droplets/test_firewalls.py | 4 +- .../gpu_droplets/test_floating_ips.py | 4 +- .../api_resources/gpu_droplets/test_images.py | 4 +- .../gpu_droplets/test_load_balancers.py | 4 +- .../api_resources/gpu_droplets/test_sizes.py | 4 +- .../gpu_droplets/test_snapshots.py | 4 +- .../gpu_droplets/test_volumes.py | 4 +- .../gpu_droplets/volumes/test_actions.py | 4 +- .../gpu_droplets/volumes/test_snapshots.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../knowledge_bases/test_indexing_jobs.py | 4 +- .../models/providers/test_anthropic.py | 4 +- .../models/providers/test_openai.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_gpu_droplets.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 48 +- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 506 files changed, 398 insertions(+), 398 deletions(-) rename src/{gradientai => do_gradientai}/__init__.py (95%) rename src/{gradientai => do_gradientai}/_base_client.py (99%) rename src/{gradientai => do_gradientai}/_client.py (100%) rename src/{gradientai => do_gradientai}/_compat.py (100%) rename src/{gradientai => do_gradientai}/_constants.py (100%) rename src/{gradientai => do_gradientai}/_exceptions.py (100%) rename src/{gradientai => do_gradientai}/_files.py (100%) rename src/{gradientai => do_gradientai}/_models.py (100%) rename src/{gradientai => do_gradientai}/_qs.py (100%) rename src/{gradientai => do_gradientai}/_resource.py (100%) rename src/{gradientai => do_gradientai}/_response.py (99%) rename src/{gradientai => do_gradientai}/_streaming.py (100%) rename src/{gradientai => do_gradientai}/_types.py (99%) rename src/{gradientai => do_gradientai}/_utils/__init__.py (100%) rename src/{gradientai => do_gradientai}/_utils/_logs.py (75%) rename src/{gradientai => do_gradientai}/_utils/_proxy.py (100%) rename src/{gradientai => do_gradientai}/_utils/_reflection.py (100%) rename src/{gradientai => do_gradientai}/_utils/_resources_proxy.py (50%) rename src/{gradientai => do_gradientai}/_utils/_streams.py (100%) rename src/{gradientai => do_gradientai}/_utils/_sync.py (100%) rename src/{gradientai => do_gradientai}/_utils/_transform.py (100%) rename src/{gradientai => do_gradientai}/_utils/_typing.py (100%) rename src/{gradientai => do_gradientai}/_utils/_utils.py (100%) rename src/{gradientai => do_gradientai}/_version.py (83%) rename src/{gradientai => do_gradientai}/py.typed (100%) rename src/{gradientai => do_gradientai}/resources/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_datasets.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/evaluation_metrics.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/models.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/workspaces.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_runs.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_test_cases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/functions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/routes.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/versions.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/account.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/keys.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/autoscale.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/backups.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/destroy_with_associated_resources.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/firewalls.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/rules.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/tags.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/floating_ips.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/gpu_droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/images.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/forwarding_rules.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/load_balancers.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/sizes.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/snapshots.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/snapshots.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/volumes.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/inference.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/indexing_jobs.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/models/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/models/models.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/anthropic.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/openai.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/providers.py (100%) rename src/{gradientai => do_gradientai}/resources/regions.py (100%) rename src/{gradientai => do_gradientai}/types/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_metric.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_metric_result.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_prompt.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_run.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_test_case.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_star_metric.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_star_metric_param.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_regions_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_regions_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/model_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_list_results_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_add_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_view_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_agreement.py (100%) rename src/{gradientai => do_gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_deployment_visibility.py (100%) rename src/{gradientai => do_gradientai}/types/api_knowledge_base.py (100%) rename src/{gradientai => do_gradientai}/types/api_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_model_version.py (100%) rename src/{gradientai => do_gradientai}/types/api_openai_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_retrieval_method.py (100%) rename src/{gradientai => do_gradientai}/types/api_workspace.py (100%) rename src/{gradientai => do_gradientai}/types/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/droplet_backup_policy.py (100%) rename src/{gradientai => do_gradientai}/types/droplet_backup_policy_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_delete_by_tag_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_firewalls_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_firewalls_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_kernels_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_kernels_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_neighbors_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_snapshots_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_snapshots_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_bulk_initiate_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_bulk_initiate_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_initiate_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_initiate_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/associated_resource.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_history_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_history_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_members_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_members_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_droplet_template.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_droplet_template_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_static_config.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_static_config_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_policies_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_policies_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_supported_policies_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_retrieve_policy_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/current_utilization.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroyed_associated_resource.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/domains.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/domains_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/droplet_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/droplet_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/rule_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/rule_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/tag_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/tag_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/forwarding_rule.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/forwarding_rule_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/glb_settings.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/glb_settings_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/health_check.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/health_check_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/action_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/lb_firewall.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/lb_firewall_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/droplet_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/droplet_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/size_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/size_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/sticky_sessions.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/sticky_sessions_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_delete_by_name_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_retrieve_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/volume_action.py (100%) rename src/{gradientai => do_gradientai}/types/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/shared/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/shared/action.py (100%) rename src/{gradientai => do_gradientai}/types/shared/action_link.py (100%) rename src/{gradientai => do_gradientai}/types/shared/api_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/api_meta.py (100%) rename src/{gradientai => do_gradientai}/types/shared/backward_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/chat_completion_chunk.py (100%) rename src/{gradientai => do_gradientai}/types/shared/chat_completion_token_logprob.py (100%) rename src/{gradientai => do_gradientai}/types/shared/completion_usage.py (100%) rename src/{gradientai => do_gradientai}/types/shared/disk_info.py (100%) rename src/{gradientai => do_gradientai}/types/shared/droplet.py (100%) rename src/{gradientai => do_gradientai}/types/shared/droplet_next_backup_window.py (100%) rename src/{gradientai => do_gradientai}/types/shared/firewall_rule_target.py (100%) rename src/{gradientai => do_gradientai}/types/shared/forward_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/garbage_collection.py (100%) rename src/{gradientai => do_gradientai}/types/shared/gpu_info.py (100%) rename src/{gradientai => do_gradientai}/types/shared/image.py (100%) rename src/{gradientai => do_gradientai}/types/shared/kernel.py (100%) rename src/{gradientai => do_gradientai}/types/shared/meta_properties.py (100%) rename src/{gradientai => do_gradientai}/types/shared/network_v4.py (100%) rename src/{gradientai => do_gradientai}/types/shared/network_v6.py (100%) rename src/{gradientai => do_gradientai}/types/shared/page_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/region.py (100%) rename src/{gradientai => do_gradientai}/types/shared/size.py (100%) rename src/{gradientai => do_gradientai}/types/shared/snapshots.py (100%) rename src/{gradientai => do_gradientai}/types/shared/subscription.py (100%) rename src/{gradientai => do_gradientai}/types/shared/subscription_tier_base.py (100%) rename src/{gradientai => do_gradientai}/types/shared/vpc_peering.py (100%) rename src/{gradientai => do_gradientai}/types/shared_params/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/shared_params/firewall_rule_target.py (100%) diff --git a/.stats.yml b/.stats.yml index d7c07274..718d3432 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 732232c90ba4600bc44b6a96e14beb96 +config_hash: 5cf9c7359c13307780aa25d0203b0b35 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 086907ef..4f59c83a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/gradientai/lib/` and `examples/` directories. +modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index a009a7cb..df2a09de 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -56,7 +56,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -96,8 +96,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import asyncio -from gradientai import DefaultAioHttpClient -from gradientai import AsyncGradientAI +from do_gradientai import DefaultAioHttpClient +from do_gradientai import AsyncGradientAI async def main() -> None: @@ -125,7 +125,7 @@ asyncio.run(main()) We provide support for streaming responses using Server Side Events (SSE). ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -146,7 +146,7 @@ for completion in stream: The async client uses the exact same interface. ```python -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI() @@ -178,7 +178,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -197,16 +197,16 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `gradientai.APIError`. +All errors inherit from `do_gradientai.APIError`. ```python -import gradientai -from gradientai import GradientAI +import do_gradientai +from do_gradientai import GradientAI client = GradientAI() @@ -220,12 +220,12 @@ try: ], model="llama3.3-70b-instruct", ) -except gradientai.APIConnectionError as e: +except do_gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except gradientai.RateLimitError as e: +except do_gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except gradientai.APIStatusError as e: +except do_gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -253,7 +253,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -279,7 +279,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -339,7 +339,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() response = client.chat.completions.with_raw_response.create( @@ -355,9 +355,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.choices) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -427,7 +427,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from gradientai import GradientAI, DefaultHttpxClient +from do_gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -450,7 +450,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from gradientai import GradientAI +from do_gradientai import GradientAI with GradientAI() as client: # make requests here @@ -478,8 +478,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import gradientai -print(gradientai.__version__) +import do_gradientai +print(do_gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 0f4770e9..5d6e5491 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from gradientai.types import ( +from do_gradientai.types import ( Action, ActionLink, APILinks, @@ -37,7 +37,7 @@ from gradientai.types import ( Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -57,19 +57,19 @@ from gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -80,11 +80,11 @@ from gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Chat @@ -93,19 +93,19 @@ Methods: Types: ```python -from gradientai.types.agents.chat import CompletionCreateResponse +from do_gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) @@ -113,15 +113,15 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces Types: ```python -from gradientai.types.agents.evaluation_metrics import ( +from do_gradientai.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -133,19 +133,19 @@ from gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from gradientai.types.agents.evaluation_metrics.workspaces import ( +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -153,27 +153,27 @@ from gradientai.types.agents.evaluation_metrics.workspaces import ( Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ### Models Types: ```python -from gradientai.types.agents.evaluation_metrics import ModelListResponse +from do_gradientai.types.agents.evaluation_metrics import ModelListResponse ``` Methods: -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse ## EvaluationRuns Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -187,17 +187,17 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -210,18 +210,18 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -229,15 +229,15 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -246,43 +246,43 @@ from gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -292,10 +292,10 @@ from gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # Chat @@ -304,19 +304,19 @@ Methods: Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai.types.chat import CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # GPUDroplets Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( DropletBackupPolicy, GPUDropletCreateResponse, GPUDropletRetrieveResponse, @@ -330,22 +330,22 @@ from gradientai.types import ( Methods: -- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse -- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse -- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse -- client.gpu_droplets.delete(droplet_id) -> None -- client.gpu_droplets.delete_by_tag(\*\*params) -> None -- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse -- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse -- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse -- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse ## Backups Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupListSupportedPoliciesResponse, @@ -355,17 +355,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse ## Actions Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( ActionRetrieveResponse, ActionListResponse, ActionBulkInitiateResponse, @@ -375,17 +375,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse ## DestroyWithAssociatedResources Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( AssociatedResource, DestroyedAssociatedResource, DestroyWithAssociatedResourceListResponse, @@ -395,18 +395,18 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None ## Autoscale Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( AutoscalePool, AutoscalePoolDropletTemplate, AutoscalePoolDynamicConfig, @@ -423,21 +423,21 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse ## Firewalls Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( Firewall, FirewallCreateResponse, FirewallRetrieveResponse, @@ -448,39 +448,39 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse -- client.gpu_droplets.firewalls.delete(firewall_id) -> None +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None ### Droplets Methods: -- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None ### Tags Methods: -- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None ### Rules Methods: -- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None ## FloatingIPs Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( FloatingIP, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -490,17 +490,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.gpu_droplets.floating_ips.delete(floating_ip) -> None +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.floating_ips import ( +from do_gradientai.types.gpu_droplets.floating_ips import ( ActionCreateResponse, ActionRetrieveResponse, ActionListResponse, @@ -509,16 +509,16 @@ from gradientai.types.gpu_droplets.floating_ips import ( Methods: -- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse ## Images Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( ImageCreateResponse, ImageRetrieveResponse, ImageUpdateResponse, @@ -528,32 +528,32 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse -- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse -- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse -- client.gpu_droplets.images.delete(image_id) -> None +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.images import ActionListResponse +from do_gradientai.types.gpu_droplets.images import ActionListResponse ``` Methods: -- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action -- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse ## LoadBalancers Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( Domains, ForwardingRule, GlbSettings, @@ -570,59 +570,59 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.gpu_droplets.load_balancers.delete(lb_id) -> None -- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None ### Droplets Methods: -- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None ### ForwardingRules Methods: -- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None ## Sizes Types: ```python -from gradientai.types.gpu_droplets import SizeListResponse +from do_gradientai.types.gpu_droplets import SizeListResponse ``` Methods: -- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse ## Snapshots Types: ```python -from gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +from do_gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse ``` Methods: -- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse -- client.gpu_droplets.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None ## Volumes Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse, @@ -631,18 +631,18 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse -- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse -- client.gpu_droplets.volumes.delete(volume_id) -> None -- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai.types.gpu_droplets.volumes import ( VolumeAction, ActionRetrieveResponse, ActionListResponse, @@ -653,17 +653,17 @@ from gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse ### Snapshots Types: ```python -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai.types.gpu_droplets.volumes import ( SnapshotCreateResponse, SnapshotRetrieveResponse, SnapshotListResponse, @@ -672,10 +672,10 @@ from gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None ## Account @@ -684,7 +684,7 @@ Methods: Types: ```python -from gradientai.types.gpu_droplets.account import ( +from do_gradientai.types.gpu_droplets.account import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -694,11 +694,11 @@ from gradientai.types.gpu_droplets.account import ( Methods: -- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse -- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse -- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None # Inference @@ -707,7 +707,7 @@ Methods: Types: ```python -from gradientai.types.inference import ( +from do_gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -719,18 +719,18 @@ from gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # KnowledgeBases Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -742,18 +742,18 @@ from gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -767,16 +767,16 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -789,18 +789,18 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Models Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgreement, APIModel, APIModelVersion, @@ -811,8 +811,8 @@ from gradientai.types import ( Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -821,7 +821,7 @@ Methods: Types: ```python -from gradientai.types.models.providers import ( +from do_gradientai.types.models.providers import ( AnthropicCreateResponse, AnthropicRetrieveResponse, AnthropicUpdateResponse, @@ -833,19 +833,19 @@ from gradientai.types.models.providers import ( Methods: -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse ### OpenAI Types: ```python -from gradientai.types.models.providers import ( +from do_gradientai.types.models.providers import ( OpenAICreateResponse, OpenAIRetrieveResponse, OpenAIUpdateResponse, @@ -857,21 +857,21 @@ from gradientai.types.models.providers import ( Methods: -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse # Regions Types: ```python -from gradientai.types import RegionListResponse +from do_gradientai.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse diff --git a/mypy.ini b/mypy.ini index 748d8234..82b0c891 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 066f5a40..a28ca97c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,14 +79,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import gradientai'" +"check:importable" = "python -c 'import do_gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -99,7 +99,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/gradientai"] +packages = ["src/do_gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -202,7 +202,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["gradientai", "tests"] +known-first-party = ["do_gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index 2ff9a58c..a320c1a8 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/gradientai/_version.py" + "src/do_gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index 37b38f6f..e46e909b 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import gradientai' +rye run python -c 'import do_gradientai' diff --git a/src/gradientai/__init__.py b/src/do_gradientai/__init__.py similarity index 95% rename from src/gradientai/__init__.py rename to src/do_gradientai/__init__.py index 3316fe47..41b943b2 100644 --- a/src/gradientai/__init__.py +++ b/src/do_gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError +# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "gradientai" + __locals[__name].__module__ = "do_gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/gradientai/_base_client.py b/src/do_gradientai/_base_client.py similarity index 99% rename from src/gradientai/_base_client.py rename to src/do_gradientai/_base_client.py index 379c27d1..326c662c 100644 --- a/src/gradientai/_base_client.py +++ b/src/do_gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/gradientai/_client.py b/src/do_gradientai/_client.py similarity index 100% rename from src/gradientai/_client.py rename to src/do_gradientai/_client.py diff --git a/src/gradientai/_compat.py b/src/do_gradientai/_compat.py similarity index 100% rename from src/gradientai/_compat.py rename to src/do_gradientai/_compat.py diff --git a/src/gradientai/_constants.py b/src/do_gradientai/_constants.py similarity index 100% rename from src/gradientai/_constants.py rename to src/do_gradientai/_constants.py diff --git a/src/gradientai/_exceptions.py b/src/do_gradientai/_exceptions.py similarity index 100% rename from src/gradientai/_exceptions.py rename to src/do_gradientai/_exceptions.py diff --git a/src/gradientai/_files.py b/src/do_gradientai/_files.py similarity index 100% rename from src/gradientai/_files.py rename to src/do_gradientai/_files.py diff --git a/src/gradientai/_models.py b/src/do_gradientai/_models.py similarity index 100% rename from src/gradientai/_models.py rename to src/do_gradientai/_models.py diff --git a/src/gradientai/_qs.py b/src/do_gradientai/_qs.py similarity index 100% rename from src/gradientai/_qs.py rename to src/do_gradientai/_qs.py diff --git a/src/gradientai/_resource.py b/src/do_gradientai/_resource.py similarity index 100% rename from src/gradientai/_resource.py rename to src/do_gradientai/_resource.py diff --git a/src/gradientai/_response.py b/src/do_gradientai/_response.py similarity index 99% rename from src/gradientai/_response.py rename to src/do_gradientai/_response.py index 2037e4ca..8ca43971 100644 --- a/src/gradientai/_response.py +++ b/src/do_gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", ) diff --git a/src/gradientai/_streaming.py b/src/do_gradientai/_streaming.py similarity index 100% rename from src/gradientai/_streaming.py rename to src/do_gradientai/_streaming.py diff --git a/src/gradientai/_types.py b/src/do_gradientai/_types.py similarity index 99% rename from src/gradientai/_types.py rename to src/do_gradientai/_types.py index 1bac876d..c356c700 100644 --- a/src/gradientai/_types.py +++ b/src/do_gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from gradientai import NoneType +# from do_gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/gradientai/_utils/__init__.py b/src/do_gradientai/_utils/__init__.py similarity index 100% rename from src/gradientai/_utils/__init__.py rename to src/do_gradientai/_utils/__init__.py diff --git a/src/gradientai/_utils/_logs.py b/src/do_gradientai/_utils/_logs.py similarity index 75% rename from src/gradientai/_utils/_logs.py rename to src/do_gradientai/_utils/_logs.py index 9047e5c8..ac45b1a5 100644 --- a/src/gradientai/_utils/_logs.py +++ b/src/do_gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("gradientai") +logger: logging.Logger = logging.getLogger("do_gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/gradientai/_utils/_proxy.py b/src/do_gradientai/_utils/_proxy.py similarity index 100% rename from src/gradientai/_utils/_proxy.py rename to src/do_gradientai/_utils/_proxy.py diff --git a/src/gradientai/_utils/_reflection.py b/src/do_gradientai/_utils/_reflection.py similarity index 100% rename from src/gradientai/_utils/_reflection.py rename to src/do_gradientai/_utils/_reflection.py diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/do_gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/gradientai/_utils/_resources_proxy.py rename to src/do_gradientai/_utils/_resources_proxy.py index b3bc4931..03763c3b 100644 --- a/src/gradientai/_utils/_resources_proxy.py +++ b/src/do_gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `gradientai.resources` module. + """A proxy for the `do_gradientai.resources` module. - This is used so that we can lazily import `gradientai.resources` only when - needed *and* so that users can just import `gradientai` and reference `gradientai.resources` + This is used so that we can lazily import `do_gradientai.resources` only when + needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("gradientai.resources") + mod = importlib.import_module("do_gradientai.resources") return mod diff --git a/src/gradientai/_utils/_streams.py b/src/do_gradientai/_utils/_streams.py similarity index 100% rename from src/gradientai/_utils/_streams.py rename to src/do_gradientai/_utils/_streams.py diff --git a/src/gradientai/_utils/_sync.py b/src/do_gradientai/_utils/_sync.py similarity index 100% rename from src/gradientai/_utils/_sync.py rename to src/do_gradientai/_utils/_sync.py diff --git a/src/gradientai/_utils/_transform.py b/src/do_gradientai/_utils/_transform.py similarity index 100% rename from src/gradientai/_utils/_transform.py rename to src/do_gradientai/_utils/_transform.py diff --git a/src/gradientai/_utils/_typing.py b/src/do_gradientai/_utils/_typing.py similarity index 100% rename from src/gradientai/_utils/_typing.py rename to src/do_gradientai/_utils/_typing.py diff --git a/src/gradientai/_utils/_utils.py b/src/do_gradientai/_utils/_utils.py similarity index 100% rename from src/gradientai/_utils/_utils.py rename to src/do_gradientai/_utils/_utils.py diff --git a/src/gradientai/_version.py b/src/do_gradientai/_version.py similarity index 83% rename from src/gradientai/_version.py rename to src/do_gradientai/_version.py index 9ba60879..e13138f7 100644 --- a/src/gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "gradientai" +__title__ = "do_gradientai" __version__ = "0.1.0-alpha.19" # x-release-please-version diff --git a/src/gradientai/py.typed b/src/do_gradientai/py.typed similarity index 100% rename from src/gradientai/py.typed rename to src/do_gradientai/py.typed diff --git a/src/gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py similarity index 100% rename from src/gradientai/resources/__init__.py rename to src/do_gradientai/resources/__init__.py diff --git a/src/gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py similarity index 100% rename from src/gradientai/resources/agents/__init__.py rename to src/do_gradientai/resources/agents/__init__.py diff --git a/src/gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py similarity index 100% rename from src/gradientai/resources/agents/agents.py rename to src/do_gradientai/resources/agents/agents.py diff --git a/src/gradientai/resources/agents/api_keys.py b/src/do_gradientai/resources/agents/api_keys.py similarity index 100% rename from src/gradientai/resources/agents/api_keys.py rename to src/do_gradientai/resources/agents/api_keys.py diff --git a/src/gradientai/resources/agents/chat/__init__.py b/src/do_gradientai/resources/agents/chat/__init__.py similarity index 100% rename from src/gradientai/resources/agents/chat/__init__.py rename to src/do_gradientai/resources/agents/chat/__init__.py diff --git a/src/gradientai/resources/agents/chat/chat.py b/src/do_gradientai/resources/agents/chat/chat.py similarity index 100% rename from src/gradientai/resources/agents/chat/chat.py rename to src/do_gradientai/resources/agents/chat/chat.py diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/do_gradientai/resources/agents/chat/completions.py similarity index 100% rename from src/gradientai/resources/agents/chat/completions.py rename to src/do_gradientai/resources/agents/chat/completions.py diff --git a/src/gradientai/resources/agents/evaluation_datasets.py b/src/do_gradientai/resources/agents/evaluation_datasets.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_datasets.py rename to src/do_gradientai/resources/agents/evaluation_datasets.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/do_gradientai/resources/agents/evaluation_metrics/__init__.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/models.py b/src/do_gradientai/resources/agents/evaluation_metrics/models.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/models.py rename to src/do_gradientai/resources/agents/evaluation_metrics/models.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py diff --git a/src/gradientai/resources/agents/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_runs.py rename to src/do_gradientai/resources/agents/evaluation_runs.py diff --git a/src/gradientai/resources/agents/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_test_cases.py rename to src/do_gradientai/resources/agents/evaluation_test_cases.py diff --git a/src/gradientai/resources/agents/functions.py b/src/do_gradientai/resources/agents/functions.py similarity index 100% rename from src/gradientai/resources/agents/functions.py rename to src/do_gradientai/resources/agents/functions.py diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/do_gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/agents/knowledge_bases.py rename to src/do_gradientai/resources/agents/knowledge_bases.py diff --git a/src/gradientai/resources/agents/routes.py b/src/do_gradientai/resources/agents/routes.py similarity index 100% rename from src/gradientai/resources/agents/routes.py rename to src/do_gradientai/resources/agents/routes.py diff --git a/src/gradientai/resources/agents/versions.py b/src/do_gradientai/resources/agents/versions.py similarity index 100% rename from src/gradientai/resources/agents/versions.py rename to src/do_gradientai/resources/agents/versions.py diff --git a/src/gradientai/resources/chat/__init__.py b/src/do_gradientai/resources/chat/__init__.py similarity index 100% rename from src/gradientai/resources/chat/__init__.py rename to src/do_gradientai/resources/chat/__init__.py diff --git a/src/gradientai/resources/chat/chat.py b/src/do_gradientai/resources/chat/chat.py similarity index 100% rename from src/gradientai/resources/chat/chat.py rename to src/do_gradientai/resources/chat/chat.py diff --git a/src/gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py similarity index 100% rename from src/gradientai/resources/chat/completions.py rename to src/do_gradientai/resources/chat/completions.py diff --git a/src/gradientai/resources/gpu_droplets/__init__.py b/src/do_gradientai/resources/gpu_droplets/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/__init__.py rename to src/do_gradientai/resources/gpu_droplets/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/account/__init__.py b/src/do_gradientai/resources/gpu_droplets/account/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/__init__.py rename to src/do_gradientai/resources/gpu_droplets/account/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/account/account.py b/src/do_gradientai/resources/gpu_droplets/account/account.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/account.py rename to src/do_gradientai/resources/gpu_droplets/account/account.py diff --git a/src/gradientai/resources/gpu_droplets/account/keys.py b/src/do_gradientai/resources/gpu_droplets/account/keys.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/keys.py rename to src/do_gradientai/resources/gpu_droplets/account/keys.py diff --git a/src/gradientai/resources/gpu_droplets/actions.py b/src/do_gradientai/resources/gpu_droplets/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/actions.py rename to src/do_gradientai/resources/gpu_droplets/actions.py diff --git a/src/gradientai/resources/gpu_droplets/autoscale.py b/src/do_gradientai/resources/gpu_droplets/autoscale.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/autoscale.py rename to src/do_gradientai/resources/gpu_droplets/autoscale.py diff --git a/src/gradientai/resources/gpu_droplets/backups.py b/src/do_gradientai/resources/gpu_droplets/backups.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/backups.py rename to src/do_gradientai/resources/gpu_droplets/backups.py diff --git a/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py rename to src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/__init__.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/droplets.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/firewalls.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/rules.py b/src/do_gradientai/resources/gpu_droplets/firewalls/rules.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/rules.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/rules.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/tags.py b/src/do_gradientai/resources/gpu_droplets/firewalls/tags.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/tags.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/tags.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/__init__.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/actions.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py diff --git a/src/gradientai/resources/gpu_droplets/gpu_droplets.py b/src/do_gradientai/resources/gpu_droplets/gpu_droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/gpu_droplets.py rename to src/do_gradientai/resources/gpu_droplets/gpu_droplets.py diff --git a/src/gradientai/resources/gpu_droplets/images/__init__.py b/src/do_gradientai/resources/gpu_droplets/images/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/__init__.py rename to src/do_gradientai/resources/gpu_droplets/images/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/images/actions.py b/src/do_gradientai/resources/gpu_droplets/images/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/actions.py rename to src/do_gradientai/resources/gpu_droplets/images/actions.py diff --git a/src/gradientai/resources/gpu_droplets/images/images.py b/src/do_gradientai/resources/gpu_droplets/images/images.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/images.py rename to src/do_gradientai/resources/gpu_droplets/images/images.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/__init__.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/droplets.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py diff --git a/src/gradientai/resources/gpu_droplets/sizes.py b/src/do_gradientai/resources/gpu_droplets/sizes.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/sizes.py rename to src/do_gradientai/resources/gpu_droplets/sizes.py diff --git a/src/gradientai/resources/gpu_droplets/snapshots.py b/src/do_gradientai/resources/gpu_droplets/snapshots.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/snapshots.py rename to src/do_gradientai/resources/gpu_droplets/snapshots.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/__init__.py b/src/do_gradientai/resources/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/__init__.py rename to src/do_gradientai/resources/gpu_droplets/volumes/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/actions.py b/src/do_gradientai/resources/gpu_droplets/volumes/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/actions.py rename to src/do_gradientai/resources/gpu_droplets/volumes/actions.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/snapshots.py rename to src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/volumes.py b/src/do_gradientai/resources/gpu_droplets/volumes/volumes.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/volumes.py rename to src/do_gradientai/resources/gpu_droplets/volumes/volumes.py diff --git a/src/gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py similarity index 100% rename from src/gradientai/resources/inference/__init__.py rename to src/do_gradientai/resources/inference/__init__.py diff --git a/src/gradientai/resources/inference/api_keys.py b/src/do_gradientai/resources/inference/api_keys.py similarity index 100% rename from src/gradientai/resources/inference/api_keys.py rename to src/do_gradientai/resources/inference/api_keys.py diff --git a/src/gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py similarity index 100% rename from src/gradientai/resources/inference/inference.py rename to src/do_gradientai/resources/inference/inference.py diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/__init__.py rename to src/do_gradientai/resources/knowledge_bases/__init__.py diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/do_gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/data_sources.py rename to src/do_gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/do_gradientai/resources/knowledge_bases/indexing_jobs.py diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/do_gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py similarity index 100% rename from src/gradientai/resources/models/__init__.py rename to src/do_gradientai/resources/models/__init__.py diff --git a/src/gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py similarity index 100% rename from src/gradientai/resources/models/models.py rename to src/do_gradientai/resources/models/models.py diff --git a/src/gradientai/resources/models/providers/__init__.py b/src/do_gradientai/resources/models/providers/__init__.py similarity index 100% rename from src/gradientai/resources/models/providers/__init__.py rename to src/do_gradientai/resources/models/providers/__init__.py diff --git a/src/gradientai/resources/models/providers/anthropic.py b/src/do_gradientai/resources/models/providers/anthropic.py similarity index 100% rename from src/gradientai/resources/models/providers/anthropic.py rename to src/do_gradientai/resources/models/providers/anthropic.py diff --git a/src/gradientai/resources/models/providers/openai.py b/src/do_gradientai/resources/models/providers/openai.py similarity index 100% rename from src/gradientai/resources/models/providers/openai.py rename to src/do_gradientai/resources/models/providers/openai.py diff --git a/src/gradientai/resources/models/providers/providers.py b/src/do_gradientai/resources/models/providers/providers.py similarity index 100% rename from src/gradientai/resources/models/providers/providers.py rename to src/do_gradientai/resources/models/providers/providers.py diff --git a/src/gradientai/resources/regions.py b/src/do_gradientai/resources/regions.py similarity index 100% rename from src/gradientai/resources/regions.py rename to src/do_gradientai/resources/regions.py diff --git a/src/gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py similarity index 100% rename from src/gradientai/types/__init__.py rename to src/do_gradientai/types/__init__.py diff --git a/src/gradientai/types/agent_create_params.py b/src/do_gradientai/types/agent_create_params.py similarity index 100% rename from src/gradientai/types/agent_create_params.py rename to src/do_gradientai/types/agent_create_params.py diff --git a/src/gradientai/types/agent_create_response.py b/src/do_gradientai/types/agent_create_response.py similarity index 100% rename from src/gradientai/types/agent_create_response.py rename to src/do_gradientai/types/agent_create_response.py diff --git a/src/gradientai/types/agent_delete_response.py b/src/do_gradientai/types/agent_delete_response.py similarity index 100% rename from src/gradientai/types/agent_delete_response.py rename to src/do_gradientai/types/agent_delete_response.py diff --git a/src/gradientai/types/agent_list_params.py b/src/do_gradientai/types/agent_list_params.py similarity index 100% rename from src/gradientai/types/agent_list_params.py rename to src/do_gradientai/types/agent_list_params.py diff --git a/src/gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py similarity index 100% rename from src/gradientai/types/agent_list_response.py rename to src/do_gradientai/types/agent_list_response.py diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/do_gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/gradientai/types/agent_retrieve_response.py rename to src/do_gradientai/types/agent_retrieve_response.py diff --git a/src/gradientai/types/agent_update_params.py b/src/do_gradientai/types/agent_update_params.py similarity index 100% rename from src/gradientai/types/agent_update_params.py rename to src/do_gradientai/types/agent_update_params.py diff --git a/src/gradientai/types/agent_update_response.py b/src/do_gradientai/types/agent_update_response.py similarity index 100% rename from src/gradientai/types/agent_update_response.py rename to src/do_gradientai/types/agent_update_response.py diff --git a/src/gradientai/types/agent_update_status_params.py b/src/do_gradientai/types/agent_update_status_params.py similarity index 100% rename from src/gradientai/types/agent_update_status_params.py rename to src/do_gradientai/types/agent_update_status_params.py diff --git a/src/gradientai/types/agent_update_status_response.py b/src/do_gradientai/types/agent_update_status_response.py similarity index 100% rename from src/gradientai/types/agent_update_status_response.py rename to src/do_gradientai/types/agent_update_status_response.py diff --git a/src/gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py similarity index 100% rename from src/gradientai/types/agents/__init__.py rename to src/do_gradientai/types/agents/__init__.py diff --git a/src/gradientai/types/agents/api_evaluation_metric.py b/src/do_gradientai/types/agents/api_evaluation_metric.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_metric.py rename to src/do_gradientai/types/agents/api_evaluation_metric.py diff --git a/src/gradientai/types/agents/api_evaluation_metric_result.py b/src/do_gradientai/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_metric_result.py rename to src/do_gradientai/types/agents/api_evaluation_metric_result.py diff --git a/src/gradientai/types/agents/api_evaluation_prompt.py b/src/do_gradientai/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_prompt.py rename to src/do_gradientai/types/agents/api_evaluation_prompt.py diff --git a/src/gradientai/types/agents/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_run.py rename to src/do_gradientai/types/agents/api_evaluation_run.py diff --git a/src/gradientai/types/agents/api_evaluation_test_case.py b/src/do_gradientai/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_test_case.py rename to src/do_gradientai/types/agents/api_evaluation_test_case.py diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/do_gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_params.py rename to src/do_gradientai/types/agents/api_key_create_params.py diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/do_gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_response.py rename to src/do_gradientai/types/agents/api_key_create_response.py diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/do_gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_delete_response.py rename to src/do_gradientai/types/agents/api_key_delete_response.py diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/do_gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_params.py rename to src/do_gradientai/types/agents/api_key_list_params.py diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_response.py rename to src/do_gradientai/types/agents/api_key_list_response.py diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/do_gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_regenerate_response.py rename to src/do_gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/do_gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_params.py rename to src/do_gradientai/types/agents/api_key_update_params.py diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/do_gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_response.py rename to src/do_gradientai/types/agents/api_key_update_response.py diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/do_gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/agents/api_link_knowledge_base_output.py rename to src/do_gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/agents/api_star_metric.py b/src/do_gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/gradientai/types/agents/api_star_metric.py rename to src/do_gradientai/types/agents/api_star_metric.py diff --git a/src/gradientai/types/agents/api_star_metric_param.py b/src/do_gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/gradientai/types/agents/api_star_metric_param.py rename to src/do_gradientai/types/agents/api_star_metric_param.py diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/do_gradientai/types/agents/chat/__init__.py similarity index 100% rename from src/gradientai/types/agents/chat/__init__.py rename to src/do_gradientai/types/agents/chat/__init__.py diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/do_gradientai/types/agents/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/agents/chat/completion_create_params.py rename to src/do_gradientai/types/agents/chat/completion_create_params.py diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/do_gradientai/types/agents/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/agents/chat/completion_create_response.py rename to src/do_gradientai/types/agents/chat/completion_create_response.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_regions_params.py rename to src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_regions_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/__init__.py rename to src/do_gradientai/types/agents/evaluation_metrics/__init__.py diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/model_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/gradientai/types/agents/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_create_params.py rename to src/do_gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/gradientai/types/agents/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_create_response.py rename to src/do_gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/gradientai/types/agents/evaluation_run_list_results_params.py b/src/do_gradientai/types/agents/evaluation_run_list_results_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_list_results_params.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_params.py diff --git a/src/gradientai/types/agents/evaluation_run_list_results_response.py b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_list_results_response.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_response.py diff --git a/src/gradientai/types/agents/evaluation_run_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_create_params.py b/src/do_gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_create_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_create_response.py b/src/do_gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_create_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_update_params.py b/src/do_gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_update_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_update_response.py b/src/do_gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_update_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/gradientai/types/agents/function_create_params.py b/src/do_gradientai/types/agents/function_create_params.py similarity index 100% rename from src/gradientai/types/agents/function_create_params.py rename to src/do_gradientai/types/agents/function_create_params.py diff --git a/src/gradientai/types/agents/function_create_response.py b/src/do_gradientai/types/agents/function_create_response.py similarity index 100% rename from src/gradientai/types/agents/function_create_response.py rename to src/do_gradientai/types/agents/function_create_response.py diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/do_gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/gradientai/types/agents/function_delete_response.py rename to src/do_gradientai/types/agents/function_delete_response.py diff --git a/src/gradientai/types/agents/function_update_params.py b/src/do_gradientai/types/agents/function_update_params.py similarity index 100% rename from src/gradientai/types/agents/function_update_params.py rename to src/do_gradientai/types/agents/function_update_params.py diff --git a/src/gradientai/types/agents/function_update_response.py b/src/do_gradientai/types/agents/function_update_response.py similarity index 100% rename from src/gradientai/types/agents/function_update_response.py rename to src/do_gradientai/types/agents/function_update_response.py diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/do_gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/agents/knowledge_base_detach_response.py rename to src/do_gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/agents/route_add_params.py b/src/do_gradientai/types/agents/route_add_params.py similarity index 100% rename from src/gradientai/types/agents/route_add_params.py rename to src/do_gradientai/types/agents/route_add_params.py diff --git a/src/gradientai/types/agents/route_add_response.py b/src/do_gradientai/types/agents/route_add_response.py similarity index 100% rename from src/gradientai/types/agents/route_add_response.py rename to src/do_gradientai/types/agents/route_add_response.py diff --git a/src/gradientai/types/agents/route_delete_response.py b/src/do_gradientai/types/agents/route_delete_response.py similarity index 100% rename from src/gradientai/types/agents/route_delete_response.py rename to src/do_gradientai/types/agents/route_delete_response.py diff --git a/src/gradientai/types/agents/route_update_params.py b/src/do_gradientai/types/agents/route_update_params.py similarity index 100% rename from src/gradientai/types/agents/route_update_params.py rename to src/do_gradientai/types/agents/route_update_params.py diff --git a/src/gradientai/types/agents/route_update_response.py b/src/do_gradientai/types/agents/route_update_response.py similarity index 100% rename from src/gradientai/types/agents/route_update_response.py rename to src/do_gradientai/types/agents/route_update_response.py diff --git a/src/gradientai/types/agents/route_view_response.py b/src/do_gradientai/types/agents/route_view_response.py similarity index 100% rename from src/gradientai/types/agents/route_view_response.py rename to src/do_gradientai/types/agents/route_view_response.py diff --git a/src/gradientai/types/agents/version_list_params.py b/src/do_gradientai/types/agents/version_list_params.py similarity index 100% rename from src/gradientai/types/agents/version_list_params.py rename to src/do_gradientai/types/agents/version_list_params.py diff --git a/src/gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py similarity index 100% rename from src/gradientai/types/agents/version_list_response.py rename to src/do_gradientai/types/agents/version_list_response.py diff --git a/src/gradientai/types/agents/version_update_params.py b/src/do_gradientai/types/agents/version_update_params.py similarity index 100% rename from src/gradientai/types/agents/version_update_params.py rename to src/do_gradientai/types/agents/version_update_params.py diff --git a/src/gradientai/types/agents/version_update_response.py b/src/do_gradientai/types/agents/version_update_response.py similarity index 100% rename from src/gradientai/types/agents/version_update_response.py rename to src/do_gradientai/types/agents/version_update_response.py diff --git a/src/gradientai/types/api_agent.py b/src/do_gradientai/types/api_agent.py similarity index 100% rename from src/gradientai/types/api_agent.py rename to src/do_gradientai/types/api_agent.py diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/do_gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/gradientai/types/api_agent_api_key_info.py rename to src/do_gradientai/types/api_agent_api_key_info.py diff --git a/src/gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py similarity index 100% rename from src/gradientai/types/api_agent_model.py rename to src/do_gradientai/types/api_agent_model.py diff --git a/src/gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py similarity index 100% rename from src/gradientai/types/api_agreement.py rename to src/do_gradientai/types/api_agreement.py diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/do_gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/gradientai/types/api_anthropic_api_key_info.py rename to src/do_gradientai/types/api_anthropic_api_key_info.py diff --git a/src/gradientai/types/api_deployment_visibility.py b/src/do_gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/gradientai/types/api_deployment_visibility.py rename to src/do_gradientai/types/api_deployment_visibility.py diff --git a/src/gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py similarity index 100% rename from src/gradientai/types/api_knowledge_base.py rename to src/do_gradientai/types/api_knowledge_base.py diff --git a/src/gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py similarity index 100% rename from src/gradientai/types/api_model.py rename to src/do_gradientai/types/api_model.py diff --git a/src/gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py similarity index 100% rename from src/gradientai/types/api_model_version.py rename to src/do_gradientai/types/api_model_version.py diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/do_gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/gradientai/types/api_openai_api_key_info.py rename to src/do_gradientai/types/api_openai_api_key_info.py diff --git a/src/gradientai/types/api_retrieval_method.py b/src/do_gradientai/types/api_retrieval_method.py similarity index 100% rename from src/gradientai/types/api_retrieval_method.py rename to src/do_gradientai/types/api_retrieval_method.py diff --git a/src/gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py similarity index 100% rename from src/gradientai/types/api_workspace.py rename to src/do_gradientai/types/api_workspace.py diff --git a/src/gradientai/types/chat/__init__.py b/src/do_gradientai/types/chat/__init__.py similarity index 100% rename from src/gradientai/types/chat/__init__.py rename to src/do_gradientai/types/chat/__init__.py diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/do_gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/chat/completion_create_params.py rename to src/do_gradientai/types/chat/completion_create_params.py diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/do_gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/chat/completion_create_response.py rename to src/do_gradientai/types/chat/completion_create_response.py diff --git a/src/gradientai/types/droplet_backup_policy.py b/src/do_gradientai/types/droplet_backup_policy.py similarity index 100% rename from src/gradientai/types/droplet_backup_policy.py rename to src/do_gradientai/types/droplet_backup_policy.py diff --git a/src/gradientai/types/droplet_backup_policy_param.py b/src/do_gradientai/types/droplet_backup_policy_param.py similarity index 100% rename from src/gradientai/types/droplet_backup_policy_param.py rename to src/do_gradientai/types/droplet_backup_policy_param.py diff --git a/src/gradientai/types/gpu_droplet_create_params.py b/src/do_gradientai/types/gpu_droplet_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_create_params.py rename to src/do_gradientai/types/gpu_droplet_create_params.py diff --git a/src/gradientai/types/gpu_droplet_create_response.py b/src/do_gradientai/types/gpu_droplet_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_create_response.py rename to src/do_gradientai/types/gpu_droplet_create_response.py diff --git a/src/gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_delete_by_tag_params.py rename to src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_params.py b/src/do_gradientai/types/gpu_droplet_list_firewalls_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_firewalls_params.py rename to src/do_gradientai/types/gpu_droplet_list_firewalls_params.py diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_response.py b/src/do_gradientai/types/gpu_droplet_list_firewalls_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_firewalls_response.py rename to src/do_gradientai/types/gpu_droplet_list_firewalls_response.py diff --git a/src/gradientai/types/gpu_droplet_list_kernels_params.py b/src/do_gradientai/types/gpu_droplet_list_kernels_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_kernels_params.py rename to src/do_gradientai/types/gpu_droplet_list_kernels_params.py diff --git a/src/gradientai/types/gpu_droplet_list_kernels_response.py b/src/do_gradientai/types/gpu_droplet_list_kernels_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_kernels_response.py rename to src/do_gradientai/types/gpu_droplet_list_kernels_response.py diff --git a/src/gradientai/types/gpu_droplet_list_neighbors_response.py b/src/do_gradientai/types/gpu_droplet_list_neighbors_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_neighbors_response.py rename to src/do_gradientai/types/gpu_droplet_list_neighbors_response.py diff --git a/src/gradientai/types/gpu_droplet_list_params.py b/src/do_gradientai/types/gpu_droplet_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_params.py rename to src/do_gradientai/types/gpu_droplet_list_params.py diff --git a/src/gradientai/types/gpu_droplet_list_response.py b/src/do_gradientai/types/gpu_droplet_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_response.py rename to src/do_gradientai/types/gpu_droplet_list_response.py diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_params.py b/src/do_gradientai/types/gpu_droplet_list_snapshots_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_snapshots_params.py rename to src/do_gradientai/types/gpu_droplet_list_snapshots_params.py diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_response.py b/src/do_gradientai/types/gpu_droplet_list_snapshots_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_snapshots_response.py rename to src/do_gradientai/types/gpu_droplet_list_snapshots_response.py diff --git a/src/gradientai/types/gpu_droplet_retrieve_response.py b/src/do_gradientai/types/gpu_droplet_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_retrieve_response.py rename to src/do_gradientai/types/gpu_droplet_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/__init__.py b/src/do_gradientai/types/gpu_droplets/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/__init__.py rename to src/do_gradientai/types/gpu_droplets/__init__.py diff --git a/src/gradientai/types/gpu_droplets/account/__init__.py b/src/do_gradientai/types/gpu_droplets/account/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/__init__.py rename to src/do_gradientai/types/gpu_droplets/account/__init__.py diff --git a/src/gradientai/types/gpu_droplets/account/key_create_params.py b/src/do_gradientai/types/gpu_droplets/account/key_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_create_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_create_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_create_response.py b/src/do_gradientai/types/gpu_droplets/account/key_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_create_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_create_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_list_params.py b/src/do_gradientai/types/gpu_droplets/account/key_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_list_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_list_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_list_response.py b/src/do_gradientai/types/gpu_droplets/account/key_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_list_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_list_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_update_params.py b/src/do_gradientai/types/gpu_droplets/account/key_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_update_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_update_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_update_response.py b/src/do_gradientai/types/gpu_droplets/account/key_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_update_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_update_response.py diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py rename to src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py rename to src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py diff --git a/src/gradientai/types/gpu_droplets/action_initiate_params.py b/src/do_gradientai/types/gpu_droplets/action_initiate_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_initiate_params.py rename to src/do_gradientai/types/gpu_droplets/action_initiate_params.py diff --git a/src/gradientai/types/gpu_droplets/action_initiate_response.py b/src/do_gradientai/types/gpu_droplets/action_initiate_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_initiate_response.py rename to src/do_gradientai/types/gpu_droplets/action_initiate_response.py diff --git a/src/gradientai/types/gpu_droplets/action_list_params.py b/src/do_gradientai/types/gpu_droplets/action_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_list_params.py rename to src/do_gradientai/types/gpu_droplets/action_list_params.py diff --git a/src/gradientai/types/gpu_droplets/action_list_response.py b/src/do_gradientai/types/gpu_droplets/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/associated_resource.py b/src/do_gradientai/types/gpu_droplets/associated_resource.py similarity index 100% rename from src/gradientai/types/gpu_droplets/associated_resource.py rename to src/do_gradientai/types/gpu_droplets/associated_resource.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_create_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_create_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_create_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_create_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_history_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_history_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_members_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_members_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_update_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_update_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_update_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_update_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_params.py b/src/do_gradientai/types/gpu_droplets/backup_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_params.py rename to src/do_gradientai/types/gpu_droplets/backup_list_params.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_policies_params.py rename to src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_policies_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py rename to src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py diff --git a/src/gradientai/types/gpu_droplets/current_utilization.py b/src/do_gradientai/types/gpu_droplets/current_utilization.py similarity index 100% rename from src/gradientai/types/gpu_droplets/current_utilization.py rename to src/do_gradientai/types/gpu_droplets/current_utilization.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py diff --git a/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroyed_associated_resource.py rename to src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py diff --git a/src/gradientai/types/gpu_droplets/domains.py b/src/do_gradientai/types/gpu_droplets/domains.py similarity index 100% rename from src/gradientai/types/gpu_droplets/domains.py rename to src/do_gradientai/types/gpu_droplets/domains.py diff --git a/src/gradientai/types/gpu_droplets/domains_param.py b/src/do_gradientai/types/gpu_droplets/domains_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/domains_param.py rename to src/do_gradientai/types/gpu_droplets/domains_param.py diff --git a/src/gradientai/types/gpu_droplets/firewall.py b/src/do_gradientai/types/gpu_droplets/firewall.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall.py rename to src/do_gradientai/types/gpu_droplets/firewall.py diff --git a/src/gradientai/types/gpu_droplets/firewall_create_params.py b/src/do_gradientai/types/gpu_droplets/firewall_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_create_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_create_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_create_response.py b/src/do_gradientai/types/gpu_droplets/firewall_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_create_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_create_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_list_params.py b/src/do_gradientai/types/gpu_droplets/firewall_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_list_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_list_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_list_response.py b/src/do_gradientai/types/gpu_droplets/firewall_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_list_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_list_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_param.py b/src/do_gradientai/types/gpu_droplets/firewall_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_param.py rename to src/do_gradientai/types/gpu_droplets/firewall_param.py diff --git a/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_update_params.py b/src/do_gradientai/types/gpu_droplets/firewall_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_update_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_update_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_update_response.py b/src/do_gradientai/types/gpu_droplets/firewall_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_update_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_update_response.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/__init__.py b/src/do_gradientai/types/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/__init__.py rename to src/do_gradientai/types/gpu_droplets/firewalls/__init__.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip.py b/src/do_gradientai/types/gpu_droplets/floating_ip.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip.py rename to src/do_gradientai/types/gpu_droplets/floating_ip.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_create_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_create_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_list_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_list_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/__init__.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule.py b/src/do_gradientai/types/gpu_droplets/forwarding_rule.py similarity index 100% rename from src/gradientai/types/gpu_droplets/forwarding_rule.py rename to src/do_gradientai/types/gpu_droplets/forwarding_rule.py diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/forwarding_rule_param.py rename to src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py diff --git a/src/gradientai/types/gpu_droplets/glb_settings.py b/src/do_gradientai/types/gpu_droplets/glb_settings.py similarity index 100% rename from src/gradientai/types/gpu_droplets/glb_settings.py rename to src/do_gradientai/types/gpu_droplets/glb_settings.py diff --git a/src/gradientai/types/gpu_droplets/glb_settings_param.py b/src/do_gradientai/types/gpu_droplets/glb_settings_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/glb_settings_param.py rename to src/do_gradientai/types/gpu_droplets/glb_settings_param.py diff --git a/src/gradientai/types/gpu_droplets/health_check.py b/src/do_gradientai/types/gpu_droplets/health_check.py similarity index 100% rename from src/gradientai/types/gpu_droplets/health_check.py rename to src/do_gradientai/types/gpu_droplets/health_check.py diff --git a/src/gradientai/types/gpu_droplets/health_check_param.py b/src/do_gradientai/types/gpu_droplets/health_check_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/health_check_param.py rename to src/do_gradientai/types/gpu_droplets/health_check_param.py diff --git a/src/gradientai/types/gpu_droplets/image_create_params.py b/src/do_gradientai/types/gpu_droplets/image_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_create_params.py rename to src/do_gradientai/types/gpu_droplets/image_create_params.py diff --git a/src/gradientai/types/gpu_droplets/image_create_response.py b/src/do_gradientai/types/gpu_droplets/image_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_create_response.py rename to src/do_gradientai/types/gpu_droplets/image_create_response.py diff --git a/src/gradientai/types/gpu_droplets/image_list_params.py b/src/do_gradientai/types/gpu_droplets/image_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_list_params.py rename to src/do_gradientai/types/gpu_droplets/image_list_params.py diff --git a/src/gradientai/types/gpu_droplets/image_list_response.py b/src/do_gradientai/types/gpu_droplets/image_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_list_response.py rename to src/do_gradientai/types/gpu_droplets/image_list_response.py diff --git a/src/gradientai/types/gpu_droplets/image_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/image_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/image_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/image_update_params.py b/src/do_gradientai/types/gpu_droplets/image_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_update_params.py rename to src/do_gradientai/types/gpu_droplets/image_update_params.py diff --git a/src/gradientai/types/gpu_droplets/image_update_response.py b/src/do_gradientai/types/gpu_droplets/image_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_update_response.py rename to src/do_gradientai/types/gpu_droplets/image_update_response.py diff --git a/src/gradientai/types/gpu_droplets/images/__init__.py b/src/do_gradientai/types/gpu_droplets/images/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/__init__.py rename to src/do_gradientai/types/gpu_droplets/images/__init__.py diff --git a/src/gradientai/types/gpu_droplets/images/action_create_params.py b/src/do_gradientai/types/gpu_droplets/images/action_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/action_create_params.py rename to src/do_gradientai/types/gpu_droplets/images/action_create_params.py diff --git a/src/gradientai/types/gpu_droplets/images/action_list_response.py b/src/do_gradientai/types/gpu_droplets/images/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/images/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/lb_firewall.py b/src/do_gradientai/types/gpu_droplets/lb_firewall.py similarity index 100% rename from src/gradientai/types/gpu_droplets/lb_firewall.py rename to src/do_gradientai/types/gpu_droplets/lb_firewall.py diff --git a/src/gradientai/types/gpu_droplets/lb_firewall_param.py b/src/do_gradientai/types/gpu_droplets/lb_firewall_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/lb_firewall_param.py rename to src/do_gradientai/types/gpu_droplets/lb_firewall_param.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer.py b/src/do_gradientai/types/gpu_droplets/load_balancer.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer.py rename to src/do_gradientai/types/gpu_droplets/load_balancer.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_create_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_create_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_list_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_list_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_update_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_update_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/__init__.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/size_list_params.py b/src/do_gradientai/types/gpu_droplets/size_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/size_list_params.py rename to src/do_gradientai/types/gpu_droplets/size_list_params.py diff --git a/src/gradientai/types/gpu_droplets/size_list_response.py b/src/do_gradientai/types/gpu_droplets/size_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/size_list_response.py rename to src/do_gradientai/types/gpu_droplets/size_list_response.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_params.py b/src/do_gradientai/types/gpu_droplets/snapshot_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_list_params.py rename to src/do_gradientai/types/gpu_droplets/snapshot_list_params.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_response.py b/src/do_gradientai/types/gpu_droplets/snapshot_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_list_response.py rename to src/do_gradientai/types/gpu_droplets/snapshot_list_response.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions.py b/src/do_gradientai/types/gpu_droplets/sticky_sessions.py similarity index 100% rename from src/gradientai/types/gpu_droplets/sticky_sessions.py rename to src/do_gradientai/types/gpu_droplets/sticky_sessions.py diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/sticky_sessions_param.py rename to src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py diff --git a/src/gradientai/types/gpu_droplets/volume_create_params.py b/src/do_gradientai/types/gpu_droplets/volume_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_create_params.py rename to src/do_gradientai/types/gpu_droplets/volume_create_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_create_response.py b/src/do_gradientai/types/gpu_droplets/volume_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_create_response.py rename to src/do_gradientai/types/gpu_droplets/volume_create_response.py diff --git a/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py rename to src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_list_params.py b/src/do_gradientai/types/gpu_droplets/volume_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_list_params.py rename to src/do_gradientai/types/gpu_droplets/volume_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_list_response.py b/src/do_gradientai/types/gpu_droplets/volume_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_list_response.py rename to src/do_gradientai/types/gpu_droplets/volume_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/__init__.py b/src/do_gradientai/types/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/__init__.py rename to src/do_gradientai/types/gpu_droplets/volumes/__init__.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_list_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/volume_action.py b/src/do_gradientai/types/gpu_droplets/volumes/volume_action.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/volume_action.py rename to src/do_gradientai/types/gpu_droplets/volumes/volume_action.py diff --git a/src/gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py similarity index 100% rename from src/gradientai/types/inference/__init__.py rename to src/do_gradientai/types/inference/__init__.py diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/do_gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_params.py rename to src/do_gradientai/types/inference/api_key_create_params.py diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/do_gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_response.py rename to src/do_gradientai/types/inference/api_key_create_response.py diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/do_gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_delete_response.py rename to src/do_gradientai/types/inference/api_key_delete_response.py diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/do_gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_params.py rename to src/do_gradientai/types/inference/api_key_list_params.py diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_response.py rename to src/do_gradientai/types/inference/api_key_list_response.py diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/do_gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_params.py rename to src/do_gradientai/types/inference/api_key_update_params.py diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/do_gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_regenerate_response.py rename to src/do_gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/do_gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_response.py rename to src/do_gradientai/types/inference/api_key_update_response.py diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/do_gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/gradientai/types/inference/api_model_api_key_info.py rename to src/do_gradientai/types/inference/api_model_api_key_info.py diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/do_gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_params.py rename to src/do_gradientai/types/knowledge_base_create_params.py diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/do_gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_response.py rename to src/do_gradientai/types/knowledge_base_create_response.py diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/do_gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_delete_response.py rename to src/do_gradientai/types/knowledge_base_delete_response.py diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/do_gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_params.py rename to src/do_gradientai/types/knowledge_base_list_params.py diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_response.py rename to src/do_gradientai/types/knowledge_base_list_response.py diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/do_gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_retrieve_response.py rename to src/do_gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/do_gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_params.py rename to src/do_gradientai/types/knowledge_base_update_params.py diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/do_gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_response.py rename to src/do_gradientai/types/knowledge_base_update_response.py diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/types/knowledge_bases/__init__.py rename to src/do_gradientai/types/knowledge_bases/__init__.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_indexing_job.py rename to src/do_gradientai/types/knowledge_bases/api_indexing_job.py diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/do_gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/do_gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/do_gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py similarity index 100% rename from src/gradientai/types/model_list_response.py rename to src/do_gradientai/types/model_list_response.py diff --git a/src/gradientai/types/model_retrieve_response.py b/src/do_gradientai/types/model_retrieve_response.py similarity index 100% rename from src/gradientai/types/model_retrieve_response.py rename to src/do_gradientai/types/model_retrieve_response.py diff --git a/src/gradientai/types/models/__init__.py b/src/do_gradientai/types/models/__init__.py similarity index 100% rename from src/gradientai/types/models/__init__.py rename to src/do_gradientai/types/models/__init__.py diff --git a/src/gradientai/types/models/providers/__init__.py b/src/do_gradientai/types/models/providers/__init__.py similarity index 100% rename from src/gradientai/types/models/providers/__init__.py rename to src/do_gradientai/types/models/providers/__init__.py diff --git a/src/gradientai/types/models/providers/anthropic_create_params.py b/src/do_gradientai/types/models/providers/anthropic_create_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_create_params.py rename to src/do_gradientai/types/models/providers/anthropic_create_params.py diff --git a/src/gradientai/types/models/providers/anthropic_create_response.py b/src/do_gradientai/types/models/providers/anthropic_create_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_create_response.py rename to src/do_gradientai/types/models/providers/anthropic_create_response.py diff --git a/src/gradientai/types/models/providers/anthropic_delete_response.py b/src/do_gradientai/types/models/providers/anthropic_delete_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_delete_response.py rename to src/do_gradientai/types/models/providers/anthropic_delete_response.py diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_params.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_agents_params.py rename to src/do_gradientai/types/models/providers/anthropic_list_agents_params.py diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_response.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_agents_response.py rename to src/do_gradientai/types/models/providers/anthropic_list_agents_response.py diff --git a/src/gradientai/types/models/providers/anthropic_list_params.py b/src/do_gradientai/types/models/providers/anthropic_list_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_params.py rename to src/do_gradientai/types/models/providers/anthropic_list_params.py diff --git a/src/gradientai/types/models/providers/anthropic_list_response.py b/src/do_gradientai/types/models/providers/anthropic_list_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_response.py rename to src/do_gradientai/types/models/providers/anthropic_list_response.py diff --git a/src/gradientai/types/models/providers/anthropic_retrieve_response.py b/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_retrieve_response.py rename to src/do_gradientai/types/models/providers/anthropic_retrieve_response.py diff --git a/src/gradientai/types/models/providers/anthropic_update_params.py b/src/do_gradientai/types/models/providers/anthropic_update_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_update_params.py rename to src/do_gradientai/types/models/providers/anthropic_update_params.py diff --git a/src/gradientai/types/models/providers/anthropic_update_response.py b/src/do_gradientai/types/models/providers/anthropic_update_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_update_response.py rename to src/do_gradientai/types/models/providers/anthropic_update_response.py diff --git a/src/gradientai/types/models/providers/openai_create_params.py b/src/do_gradientai/types/models/providers/openai_create_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_create_params.py rename to src/do_gradientai/types/models/providers/openai_create_params.py diff --git a/src/gradientai/types/models/providers/openai_create_response.py b/src/do_gradientai/types/models/providers/openai_create_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_create_response.py rename to src/do_gradientai/types/models/providers/openai_create_response.py diff --git a/src/gradientai/types/models/providers/openai_delete_response.py b/src/do_gradientai/types/models/providers/openai_delete_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_delete_response.py rename to src/do_gradientai/types/models/providers/openai_delete_response.py diff --git a/src/gradientai/types/models/providers/openai_list_params.py b/src/do_gradientai/types/models/providers/openai_list_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_list_params.py rename to src/do_gradientai/types/models/providers/openai_list_params.py diff --git a/src/gradientai/types/models/providers/openai_list_response.py b/src/do_gradientai/types/models/providers/openai_list_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_list_response.py rename to src/do_gradientai/types/models/providers/openai_list_response.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_agents_params.py rename to src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_agents_response.py rename to src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_response.py rename to src/do_gradientai/types/models/providers/openai_retrieve_response.py diff --git a/src/gradientai/types/models/providers/openai_update_params.py b/src/do_gradientai/types/models/providers/openai_update_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_update_params.py rename to src/do_gradientai/types/models/providers/openai_update_params.py diff --git a/src/gradientai/types/models/providers/openai_update_response.py b/src/do_gradientai/types/models/providers/openai_update_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_update_response.py rename to src/do_gradientai/types/models/providers/openai_update_response.py diff --git a/src/gradientai/types/region_list_params.py b/src/do_gradientai/types/region_list_params.py similarity index 100% rename from src/gradientai/types/region_list_params.py rename to src/do_gradientai/types/region_list_params.py diff --git a/src/gradientai/types/region_list_response.py b/src/do_gradientai/types/region_list_response.py similarity index 100% rename from src/gradientai/types/region_list_response.py rename to src/do_gradientai/types/region_list_response.py diff --git a/src/gradientai/types/shared/__init__.py b/src/do_gradientai/types/shared/__init__.py similarity index 100% rename from src/gradientai/types/shared/__init__.py rename to src/do_gradientai/types/shared/__init__.py diff --git a/src/gradientai/types/shared/action.py b/src/do_gradientai/types/shared/action.py similarity index 100% rename from src/gradientai/types/shared/action.py rename to src/do_gradientai/types/shared/action.py diff --git a/src/gradientai/types/shared/action_link.py b/src/do_gradientai/types/shared/action_link.py similarity index 100% rename from src/gradientai/types/shared/action_link.py rename to src/do_gradientai/types/shared/action_link.py diff --git a/src/gradientai/types/shared/api_links.py b/src/do_gradientai/types/shared/api_links.py similarity index 100% rename from src/gradientai/types/shared/api_links.py rename to src/do_gradientai/types/shared/api_links.py diff --git a/src/gradientai/types/shared/api_meta.py b/src/do_gradientai/types/shared/api_meta.py similarity index 100% rename from src/gradientai/types/shared/api_meta.py rename to src/do_gradientai/types/shared/api_meta.py diff --git a/src/gradientai/types/shared/backward_links.py b/src/do_gradientai/types/shared/backward_links.py similarity index 100% rename from src/gradientai/types/shared/backward_links.py rename to src/do_gradientai/types/shared/backward_links.py diff --git a/src/gradientai/types/shared/chat_completion_chunk.py b/src/do_gradientai/types/shared/chat_completion_chunk.py similarity index 100% rename from src/gradientai/types/shared/chat_completion_chunk.py rename to src/do_gradientai/types/shared/chat_completion_chunk.py diff --git a/src/gradientai/types/shared/chat_completion_token_logprob.py b/src/do_gradientai/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/gradientai/types/shared/chat_completion_token_logprob.py rename to src/do_gradientai/types/shared/chat_completion_token_logprob.py diff --git a/src/gradientai/types/shared/completion_usage.py b/src/do_gradientai/types/shared/completion_usage.py similarity index 100% rename from src/gradientai/types/shared/completion_usage.py rename to src/do_gradientai/types/shared/completion_usage.py diff --git a/src/gradientai/types/shared/disk_info.py b/src/do_gradientai/types/shared/disk_info.py similarity index 100% rename from src/gradientai/types/shared/disk_info.py rename to src/do_gradientai/types/shared/disk_info.py diff --git a/src/gradientai/types/shared/droplet.py b/src/do_gradientai/types/shared/droplet.py similarity index 100% rename from src/gradientai/types/shared/droplet.py rename to src/do_gradientai/types/shared/droplet.py diff --git a/src/gradientai/types/shared/droplet_next_backup_window.py b/src/do_gradientai/types/shared/droplet_next_backup_window.py similarity index 100% rename from src/gradientai/types/shared/droplet_next_backup_window.py rename to src/do_gradientai/types/shared/droplet_next_backup_window.py diff --git a/src/gradientai/types/shared/firewall_rule_target.py b/src/do_gradientai/types/shared/firewall_rule_target.py similarity index 100% rename from src/gradientai/types/shared/firewall_rule_target.py rename to src/do_gradientai/types/shared/firewall_rule_target.py diff --git a/src/gradientai/types/shared/forward_links.py b/src/do_gradientai/types/shared/forward_links.py similarity index 100% rename from src/gradientai/types/shared/forward_links.py rename to src/do_gradientai/types/shared/forward_links.py diff --git a/src/gradientai/types/shared/garbage_collection.py b/src/do_gradientai/types/shared/garbage_collection.py similarity index 100% rename from src/gradientai/types/shared/garbage_collection.py rename to src/do_gradientai/types/shared/garbage_collection.py diff --git a/src/gradientai/types/shared/gpu_info.py b/src/do_gradientai/types/shared/gpu_info.py similarity index 100% rename from src/gradientai/types/shared/gpu_info.py rename to src/do_gradientai/types/shared/gpu_info.py diff --git a/src/gradientai/types/shared/image.py b/src/do_gradientai/types/shared/image.py similarity index 100% rename from src/gradientai/types/shared/image.py rename to src/do_gradientai/types/shared/image.py diff --git a/src/gradientai/types/shared/kernel.py b/src/do_gradientai/types/shared/kernel.py similarity index 100% rename from src/gradientai/types/shared/kernel.py rename to src/do_gradientai/types/shared/kernel.py diff --git a/src/gradientai/types/shared/meta_properties.py b/src/do_gradientai/types/shared/meta_properties.py similarity index 100% rename from src/gradientai/types/shared/meta_properties.py rename to src/do_gradientai/types/shared/meta_properties.py diff --git a/src/gradientai/types/shared/network_v4.py b/src/do_gradientai/types/shared/network_v4.py similarity index 100% rename from src/gradientai/types/shared/network_v4.py rename to src/do_gradientai/types/shared/network_v4.py diff --git a/src/gradientai/types/shared/network_v6.py b/src/do_gradientai/types/shared/network_v6.py similarity index 100% rename from src/gradientai/types/shared/network_v6.py rename to src/do_gradientai/types/shared/network_v6.py diff --git a/src/gradientai/types/shared/page_links.py b/src/do_gradientai/types/shared/page_links.py similarity index 100% rename from src/gradientai/types/shared/page_links.py rename to src/do_gradientai/types/shared/page_links.py diff --git a/src/gradientai/types/shared/region.py b/src/do_gradientai/types/shared/region.py similarity index 100% rename from src/gradientai/types/shared/region.py rename to src/do_gradientai/types/shared/region.py diff --git a/src/gradientai/types/shared/size.py b/src/do_gradientai/types/shared/size.py similarity index 100% rename from src/gradientai/types/shared/size.py rename to src/do_gradientai/types/shared/size.py diff --git a/src/gradientai/types/shared/snapshots.py b/src/do_gradientai/types/shared/snapshots.py similarity index 100% rename from src/gradientai/types/shared/snapshots.py rename to src/do_gradientai/types/shared/snapshots.py diff --git a/src/gradientai/types/shared/subscription.py b/src/do_gradientai/types/shared/subscription.py similarity index 100% rename from src/gradientai/types/shared/subscription.py rename to src/do_gradientai/types/shared/subscription.py diff --git a/src/gradientai/types/shared/subscription_tier_base.py b/src/do_gradientai/types/shared/subscription_tier_base.py similarity index 100% rename from src/gradientai/types/shared/subscription_tier_base.py rename to src/do_gradientai/types/shared/subscription_tier_base.py diff --git a/src/gradientai/types/shared/vpc_peering.py b/src/do_gradientai/types/shared/vpc_peering.py similarity index 100% rename from src/gradientai/types/shared/vpc_peering.py rename to src/do_gradientai/types/shared/vpc_peering.py diff --git a/src/gradientai/types/shared_params/__init__.py b/src/do_gradientai/types/shared_params/__init__.py similarity index 100% rename from src/gradientai/types/shared_params/__init__.py rename to src/do_gradientai/types/shared_params/__init__.py diff --git a/src/gradientai/types/shared_params/firewall_rule_target.py b/src/do_gradientai/types/shared_params/firewall_rule_target.py similarity index 100% rename from src/gradientai/types/shared_params/firewall_rule_target.py rename to src/do_gradientai/types/shared_params/firewall_rule_target.py diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index dc13cd85..d80b5c09 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 6b8f8bc7..27ab4a27 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ModelListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index ea39c474..2728393e 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 635721b3..37d39018 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics.workspaces import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index c29511f5..1e5275fe 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 0413591e..56edd598 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index d64367ae..303d85d6 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 2ea44e6b..9d443f16 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index e9083ba3..ae986abc 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 4390d1d2..624446e0 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 2ac20d89..7ac99316 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index d04e8c90..256a4757 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index d6151470..158856ed 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 46c8b431..95b02106 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py index acad3575..cf168f61 100644 --- a/tests/api_resources/gpu_droplets/account/test_keys.py +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.account import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.account import ( KeyListResponse, KeyCreateResponse, KeyUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py index 67d132aa..819a5e6e 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py index 446a11af..b2eab40c 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_rules.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py index a0227c61..25c9362b 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_tags.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index 82a12d2e..ad26db8a 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.floating_ips import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.floating_ips import ( ActionListResponse, ActionCreateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index 4d59c85b..35861bcb 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -7,10 +7,10 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.shared import Action -from gradientai.types.gpu_droplets.images import ActionListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.shared import Action +from do_gradientai.types.gpu_droplets.images import ActionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py index 333567f4..f22213e2 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py index ec6f7838..d53bd0db 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py index 5e443dd8..74e45b44 100644 --- a/tests/api_resources/gpu_droplets/test_actions.py +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( ActionListResponse, ActionInitiateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index 42164666..cec0371d 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( AutoscaleListResponse, AutoscaleCreateResponse, AutoscaleUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index f8f72140..334c701f 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupRetrievePolicyResponse, diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index b6922feb..2aef1fce 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( DestroyWithAssociatedResourceListResponse, DestroyWithAssociatedResourceCheckStatusResponse, ) diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 537fe7d2..6d98ebe8 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( FirewallListResponse, FirewallCreateResponse, FirewallUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index 830e9b39..9b8b3183 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( FloatingIPListResponse, FloatingIPCreateResponse, FloatingIPRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index 7be6a786..5a2a7c0c 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( ImageListResponse, ImageCreateResponse, ImageUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index c1ce1ce2..b96c6d52 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( LoadBalancerListResponse, LoadBalancerCreateResponse, LoadBalancerUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py index eda73b1e..1ff11cd7 100644 --- a/tests/api_resources/gpu_droplets/test_sizes.py +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import SizeListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import SizeListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py index 5d7132c2..413dd993 100644 --- a/tests/api_resources/gpu_droplets/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index 64bcb4c5..baf6b430 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( VolumeListResponse, VolumeCreateResponse, VolumeRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py index d5338c97..40d9b4eb 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_actions.py +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.volumes import ( ActionListResponse, ActionRetrieveResponse, ActionInitiateByIDResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index 8b72305c..4884d372 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.volumes import ( SnapshotListResponse, SnapshotCreateResponse, SnapshotRetrieveResponse, diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 157a2e3d..85ad49da 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 55b056b8..ebb0841a 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index ed32d7f8..b0185941 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index c61a97ea..6b3d99a3 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.models.providers import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.models.providers import ( AnthropicListResponse, AnthropicCreateResponse, AnthropicDeleteResponse, diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 7fde1a69..bdde97ca 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.models.providers import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.models.providers import ( OpenAIListResponse, OpenAICreateResponse, OpenAIDeleteResponse, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 8a6a7d69..2f68a06f 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index 22f3d2d0..cbc7e63b 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( GPUDropletListResponse, GPUDropletCreateResponse, GPUDropletRetrieveResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 8a331b52..c4d179cc 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index fe837973..803c5d5a 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse, ModelRetrieveResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 4f232293..f331342e 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import RegionListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/conftest.py b/tests/conftest.py index 5b24e1c2..1e102b94 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from gradientai._utils import is_dict +from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from do_gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("gradientai").setLevel(logging.DEBUG) +logging.getLogger("do_gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index 61013a0a..85b7d31a 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,12 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from gradientai._types import Omit -from gradientai._models import BaseModel, FinalRequestOptions -from gradientai._streaming import Stream, AsyncStream -from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from gradientai._base_client import ( +from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from do_gradientai._types import Omit +from do_gradientai._models import BaseModel, FinalRequestOptions +from do_gradientai._streaming import Stream, AsyncStream +from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from do_gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -259,10 +259,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -873,7 +873,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -891,7 +891,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) @@ -909,7 +909,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -948,7 +948,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -980,7 +980,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1261,10 +1261,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1880,7 +1880,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1900,7 +1900,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1920,7 +1920,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1960,7 +1960,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1993,7 +1993,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -2036,8 +2036,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from gradientai._utils import asyncify - from gradientai._base_client import get_platform + from do_gradientai._utils import asyncify + from do_gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 9d1579a8..5a98ce1b 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from gradientai._utils import deepcopy_minimal +from do_gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 2905d59c..341e65ae 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from gradientai._types import FileTypes -from gradientai._utils import extract_files +from do_gradientai._types import FileTypes +from do_gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index 4a723313..ff7914bb 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from gradientai._files import to_httpx_files, async_to_httpx_files +from do_gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 3a857584..bfbef61a 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from gradientai._utils import PropertyInfo -from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from gradientai._models import BaseModel, construct_type +from do_gradientai._utils import PropertyInfo +from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from do_gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index 9080377b..c9213571 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from gradientai._qs import Querystring, stringify +from do_gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index c4e6b9d8..434e9491 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from gradientai._utils import required_args +from do_gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 1a8f241e..001ce776 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from gradientai import BaseModel, GradientAI, AsyncGradientAI -from gradientai._response import ( +from do_gradientai import BaseModel, GradientAI, AsyncGradientAI +from do_gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from gradientai._streaming import Stream -from gradientai._base_client import FinalRequestOptions +from do_gradientai._streaming import Stream +from do_gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index cdb41a77..c1ce8e85 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from gradientai import GradientAI, AsyncGradientAI -from gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 825fe048..30c06d6a 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from gradientai._types import NOT_GIVEN, Base64FileInput -from gradientai._utils import ( +from do_gradientai._types import NOT_GIVEN, Base64FileInput +from do_gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from gradientai._compat import PYDANTIC_V2 -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2 +from do_gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 3856b2c9..9ce2e0d3 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from gradientai._utils import LazyProxy +from do_gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 66ad064f..c9129fdc 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from gradientai._utils import extract_type_var_from_base +from do_gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index b539ed2c..9def7c60 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from gradientai._types import Omit, NoneType -from gradientai._utils import ( +from do_gradientai._types import Omit, NoneType +from do_gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from do_gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From 5947e458845c195686672d5ea5d8ab0511846c79 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 16:33:10 +0000 Subject: [PATCH 132/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b386befd..e613b816 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.19" + ".": "0.1.0-beta.1" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index a28ca97c..41d41f79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "do_gradientai" -version = "0.1.0-alpha.19" +version = "0.1.0-beta.1" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index e13138f7..9ce69989 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.19" # x-release-please-version +__version__ = "0.1.0-beta.1" # x-release-please-version From f5696319548c38386c13e4db6343fc1ba169c880 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 02:20:06 +0000 Subject: [PATCH 133/200] fix(parsing): ignore empty metadata --- src/do_gradientai/_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/do_gradientai/_models.py b/src/do_gradientai/_models.py index 528d5680..ffcbf67b 100644 --- a/src/do_gradientai/_models.py +++ b/src/do_gradientai/_models.py @@ -439,7 +439,7 @@ def construct_type(*, value: object, type_: object, metadata: Optional[List[Any] type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if metadata is not None: + if metadata is not None and len(metadata) > 0: meta: tuple[Any, ...] = tuple(metadata) elif is_annotated_type(type_): meta = get_args(type_)[1:] From b67c8bd967fbd854537665255fca9a6976cca4d7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 03:23:05 +0000 Subject: [PATCH 134/200] chore(types): rebuild Pydantic models after all types are defined --- src/do_gradientai/types/__init__.py | 65 +++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py index 20747fb3..0e08e399 100644 --- a/src/do_gradientai/types/__init__.py +++ b/src/do_gradientai/types/__init__.py @@ -2,6 +2,18 @@ from __future__ import annotations +from . import ( + agents, + models, + api_agent, + api_workspace, + agent_create_response, + agent_delete_response, + agent_update_response, + agent_retrieve_response, + agent_update_status_response, +) +from .. import _compat from .shared import ( Size as Size, Image as Image, @@ -80,3 +92,56 @@ from .gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse from .gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse as GPUDropletListNeighborsResponse from .gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse as GPUDropletListSnapshotsResponse + +# Rebuild cyclical models only after all modules are imported. +# This ensures that, when building the deferred (due to cyclical references) model schema, +# Pydantic can resolve the necessary references. +# See: https://github.com/pydantic/pydantic/issues/11250 for more context. +if _compat.PYDANTIC_V2: + api_agent.APIAgent.model_rebuild(_parent_namespace_depth=0) + api_workspace.APIWorkspace.model_rebuild(_parent_namespace_depth=0) + agent_create_response.AgentCreateResponse.model_rebuild(_parent_namespace_depth=0) + agent_retrieve_response.AgentRetrieveResponse.model_rebuild(_parent_namespace_depth=0) + agent_update_response.AgentUpdateResponse.model_rebuild(_parent_namespace_depth=0) + agent_delete_response.AgentDeleteResponse.model_rebuild(_parent_namespace_depth=0) + agent_update_status_response.AgentUpdateStatusResponse.model_rebuild(_parent_namespace_depth=0) + agents.evaluation_metrics.workspace_create_response.WorkspaceCreateResponse.model_rebuild(_parent_namespace_depth=0) + agents.evaluation_metrics.workspace_retrieve_response.WorkspaceRetrieveResponse.model_rebuild( + _parent_namespace_depth=0 + ) + agents.evaluation_metrics.workspace_update_response.WorkspaceUpdateResponse.model_rebuild(_parent_namespace_depth=0) + agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.model_rebuild(_parent_namespace_depth=0) + agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.model_rebuild(_parent_namespace_depth=0) + agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.model_rebuild(_parent_namespace_depth=0) + agents.function_create_response.FunctionCreateResponse.model_rebuild(_parent_namespace_depth=0) + agents.function_update_response.FunctionUpdateResponse.model_rebuild(_parent_namespace_depth=0) + agents.function_delete_response.FunctionDeleteResponse.model_rebuild(_parent_namespace_depth=0) + agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.model_rebuild(_parent_namespace_depth=0) + agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.model_rebuild(_parent_namespace_depth=0) + agents.route_view_response.RouteViewResponse.model_rebuild(_parent_namespace_depth=0) + models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.model_rebuild(_parent_namespace_depth=0) + models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.model_rebuild( + _parent_namespace_depth=0 + ) +else: + api_agent.APIAgent.update_forward_refs() # type: ignore + api_workspace.APIWorkspace.update_forward_refs() # type: ignore + agent_create_response.AgentCreateResponse.update_forward_refs() # type: ignore + agent_retrieve_response.AgentRetrieveResponse.update_forward_refs() # type: ignore + agent_update_response.AgentUpdateResponse.update_forward_refs() # type: ignore + agent_delete_response.AgentDeleteResponse.update_forward_refs() # type: ignore + agent_update_status_response.AgentUpdateStatusResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_create_response.WorkspaceCreateResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_retrieve_response.WorkspaceRetrieveResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_update_response.WorkspaceUpdateResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.update_forward_refs() # type: ignore + agents.function_create_response.FunctionCreateResponse.update_forward_refs() # type: ignore + agents.function_update_response.FunctionUpdateResponse.update_forward_refs() # type: ignore + agents.function_delete_response.FunctionDeleteResponse.update_forward_refs() # type: ignore + agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore + agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore + agents.route_view_response.RouteViewResponse.update_forward_refs() # type: ignore + models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.update_forward_refs() # type: ignore + models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.update_forward_refs() # type: ignore From 05be1f09ecf1272644c5e9e3b78e33375a37b789 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 20:01:10 +0000 Subject: [PATCH 135/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e613b816..aff3ead3 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-beta.1" + ".": "0.1.0-beta.2" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 41d41f79..3f12bf03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "do_gradientai" -version = "0.1.0-beta.1" +version = "0.1.0-beta.2" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index 9ce69989..a31f70ad 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-beta.1" # x-release-please-version +__version__ = "0.1.0-beta.2" # x-release-please-version From 9928a3fb0641d7254c583b4eefc04f8d31274d60 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Jul 2025 02:23:18 +0000 Subject: [PATCH 136/200] fix(parsing): parse extra field types --- src/do_gradientai/_models.py | 25 +++++++++++++++++++++++-- tests/test_models.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/src/do_gradientai/_models.py b/src/do_gradientai/_models.py index ffcbf67b..b8387ce9 100644 --- a/src/do_gradientai/_models.py +++ b/src/do_gradientai/_models.py @@ -208,14 +208,18 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] else: fields_values[name] = field_get_default(field) + extra_field_type = _get_extra_fields_type(__cls) + _extra = {} for key, value in values.items(): if key not in model_fields: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + if PYDANTIC_V2: - _extra[key] = value + _extra[key] = parsed else: _fields_set.add(key) - fields_values[key] = value + fields_values[key] = parsed object.__setattr__(m, "__dict__", fields_values) @@ -370,6 +374,23 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if not PYDANTIC_V2: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None + + def is_basemodel(type_: type) -> bool: """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" if is_union(type_): diff --git a/tests/test_models.py b/tests/test_models.py index bfbef61a..9a3891e3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ import json -from typing import Any, Dict, List, Union, Optional, cast +from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone from typing_extensions import Literal, Annotated, TypeAliasType @@ -934,3 +934,30 @@ class Type2(BaseModel): ) assert isinstance(model, Type1) assert isinstance(model.value, InnerType2) + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +def test_extra_properties() -> None: + class Item(BaseModel): + prop: int + + class Model(BaseModel): + __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + other: str + + if TYPE_CHECKING: + + def __getattr__(self, attr: str) -> Item: ... + + model = construct_type( + type_=Model, + value={ + "a": {"prop": 1}, + "other": "foo", + }, + ) + assert isinstance(model, Model) + assert model.a.prop == 1 + assert isinstance(model.a, Item) + assert model.other == "foo" From 582deb0da893a9baf02625f65bd2346e68f94dd9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 25 Jul 2025 05:12:07 +0000 Subject: [PATCH 137/200] chore(project): add settings file for vscode --- .gitignore | 1 - .vscode/settings.json | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 .vscode/settings.json diff --git a/.gitignore b/.gitignore index 87797408..95ceb189 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ .prism.log -.vscode _dev __pycache__ diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..5b010307 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.analysis.importFormat": "relative", +} From ac46515308fa82c6c4e284272b5383adfbbce1ca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 25 Jul 2025 19:31:32 +0000 Subject: [PATCH 138/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index aff3ead3..bf7fe4fa 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-beta.2" + ".": "0.1.0-beta.3" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3f12bf03..e6983ab4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "do_gradientai" -version = "0.1.0-beta.2" +version = "0.1.0-beta.3" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index a31f70ad..2789c067 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-beta.2" # x-release-please-version +__version__ = "0.1.0-beta.3" # x-release-please-version From e70c1a534c8f40029d3b50cefc6d35e8f31ac041 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:31:09 +0000 Subject: [PATCH 139/200] chore: update SDK settings --- .stats.yml | 8 +- README.md | 6 +- api.md | 87 ++- pyproject.toml | 2 +- requirements-dev.lock | 16 +- requirements.lock | 16 +- src/do_gradientai/_client.py | 39 +- src/do_gradientai/resources/__init__.py | 14 + .../agents/evaluation_metrics/__init__.py | 28 + .../evaluation_metrics/anthropic/__init__.py | 33 + .../evaluation_metrics/anthropic/anthropic.py | 102 +++ .../evaluation_metrics/anthropic/keys.py | 711 ++++++++++++++++++ .../evaluation_metrics/evaluation_metrics.py | 64 ++ .../evaluation_metrics/openai/__init__.py | 33 + .../agents/evaluation_metrics/openai/keys.py | 707 +++++++++++++++++ .../evaluation_metrics/openai/openai.py | 102 +++ .../resources/databases/__init__.py | 33 + .../resources/databases/databases.py | 102 +++ .../databases/schema_registry/__init__.py | 33 + .../databases/schema_registry/config.py | 506 +++++++++++++ .../schema_registry/schema_registry.py | 102 +++ src/do_gradientai/resources/models/models.py | 182 +++-- src/do_gradientai/types/__init__.py | 10 +- .../evaluation_metrics/anthropic/__init__.py | 14 + .../anthropic/key_create_params.py | 15 + .../anthropic/key_create_response.py | 13 + .../anthropic/key_delete_response.py | 13 + .../anthropic/key_list_agents_params.py | 15 + .../anthropic/key_list_agents_response.py | 24 + .../anthropic/key_list_params.py | 15 + .../anthropic/key_list_response.py | 21 + .../anthropic/key_retrieve_response.py | 13 + .../anthropic/key_update_params.py | 20 + .../anthropic/key_update_response.py | 13 + .../evaluation_metrics/openai/__init__.py | 14 + .../openai/key_create_params.py | 15 + .../openai/key_create_response.py | 13 + .../openai/key_delete_response.py | 13 + .../openai/key_list_agents_params.py | 15 + .../openai/key_list_agents_response.py | 24 + .../openai/key_list_params.py | 15 + .../openai/key_list_response.py | 21 + .../openai/key_retrieve_response.py | 13 + .../openai/key_update_params.py | 20 + .../openai/key_update_response.py | 13 + src/do_gradientai/types/databases/__init__.py | 3 + .../databases/schema_registry/__init__.py | 10 + .../config_retrieve_response.py | 14 + .../config_retrieve_subject_response.py | 17 + .../schema_registry/config_update_params.py | 14 + .../schema_registry/config_update_response.py | 14 + .../config_update_subject_params.py | 16 + .../config_update_subject_response.py | 17 + src/do_gradientai/types/model_list_params.py | 42 ++ .../types/model_list_response.py | 31 +- .../types/model_retrieve_response.py | 21 - .../evaluation_metrics/anthropic/__init__.py | 1 + .../evaluation_metrics/anthropic/test_keys.py | 557 ++++++++++++++ .../evaluation_metrics/openai/__init__.py | 1 + .../evaluation_metrics/openai/test_keys.py | 557 ++++++++++++++ tests/api_resources/databases/__init__.py | 1 + .../databases/schema_registry/__init__.py | 1 + .../databases/schema_registry/test_config.py | 423 +++++++++++ tests/api_resources/test_models.py | 100 +-- 64 files changed, 4895 insertions(+), 233 deletions(-) create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py create mode 100644 src/do_gradientai/resources/databases/__init__.py create mode 100644 src/do_gradientai/resources/databases/databases.py create mode 100644 src/do_gradientai/resources/databases/schema_registry/__init__.py create mode 100644 src/do_gradientai/resources/databases/schema_registry/config.py create mode 100644 src/do_gradientai/resources/databases/schema_registry/schema_registry.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py create mode 100644 src/do_gradientai/types/databases/__init__.py create mode 100644 src/do_gradientai/types/databases/schema_registry/__init__.py create mode 100644 src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py create mode 100644 src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_params.py create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_response.py create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py create mode 100644 src/do_gradientai/types/model_list_params.py delete mode 100644 src/do_gradientai/types/model_retrieve_response.py create mode 100644 tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py create mode 100644 tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py create mode 100644 tests/api_resources/agents/evaluation_metrics/openai/__init__.py create mode 100644 tests/api_resources/agents/evaluation_metrics/openai/test_keys.py create mode 100644 tests/api_resources/databases/__init__.py create mode 100644 tests/api_resources/databases/schema_registry/__init__.py create mode 100644 tests/api_resources/databases/schema_registry/test_config.py diff --git a/.stats.yml b/.stats.yml index 718d3432..3b125b04 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 168 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml -openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 5cf9c7359c13307780aa25d0203b0b35 +configured_endpoints: 170 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml +openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859 +config_hash: 2eaf277c8b9bf0acf76b2b16f99ff443 diff --git a/README.md b/README.md index df2a09de..7d41d9c3 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Gradient AI Python API library -[![PyPI version](https://img.shields.io/pypi/v/do_gradientai.svg?label=pypi%20(stable))](https://pypi.org/project/do_gradientai/) +[![PyPI version](https://img.shields.io/pypi/v/gradient.svg?label=pypi%20(stable))](https://pypi.org/project/gradient/) The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -17,7 +17,7 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ```sh # install from PyPI -pip install --pre do_gradientai +pip install --pre gradient ``` ## Usage @@ -89,7 +89,7 @@ You can enable this by installing `aiohttp`: ```sh # install from PyPI -pip install --pre do_gradientai[aiohttp] +pip install --pre gradient[aiohttp] ``` Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: diff --git a/api.md b/api.md index 5d6e5491..dc52233d 100644 --- a/api.md +++ b/api.md @@ -168,6 +168,58 @@ Methods: - client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +### Anthropic + +#### Keys + +Types: + +```python +from do_gradientai.types.agents.evaluation_metrics.anthropic import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyListAgentsResponse, +) +``` + +Methods: + +- client.agents.evaluation_metrics.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.agents.evaluation_metrics.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.agents.evaluation_metrics.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.agents.evaluation_metrics.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.agents.evaluation_metrics.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.agents.evaluation_metrics.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse + +### OpenAI + +#### Keys + +Types: + +```python +from do_gradientai.types.agents.evaluation_metrics.openai import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyListAgentsResponse, +) +``` + +Methods: + +- client.agents.evaluation_metrics.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.agents.evaluation_metrics.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.agents.evaluation_metrics.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.agents.evaluation_metrics.openai.keys.list(\*\*params) -> KeyListResponse +- client.agents.evaluation_metrics.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.agents.evaluation_metrics.openai.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse + ## EvaluationRuns Types: @@ -800,19 +852,12 @@ Methods: Types: ```python -from do_gradientai.types import ( - APIAgreement, - APIModel, - APIModelVersion, - ModelRetrieveResponse, - ModelListResponse, -) +from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse ## Providers @@ -875,3 +920,27 @@ from do_gradientai.types import RegionListResponse Methods: - client.regions.list(\*\*params) -> RegionListResponse + +# Databases + +## SchemaRegistry + +### Config + +Types: + +```python +from do_gradientai.types.databases.schema_registry import ( + ConfigRetrieveResponse, + ConfigUpdateResponse, + ConfigRetrieveSubjectResponse, + ConfigUpdateSubjectResponse, +) +``` + +Methods: + +- client.databases.schema_registry.config.retrieve(database_cluster_uuid) -> ConfigRetrieveResponse +- client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse +- client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse +- client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse diff --git a/pyproject.toml b/pyproject.toml index e6983ab4..fb34755a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "do_gradientai" +name = "gradient" version = "0.1.0-beta.3" description = "The official Python library for the GradientAI API" dynamic = ["readme"] diff --git a/requirements-dev.lock b/requirements-dev.lock index f839fd0e..7a0f60ab 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -13,14 +13,14 @@ aiohappyeyeballs==2.6.1 # via aiohttp aiohttp==3.12.8 - # via do-gradientai + # via gradient # via httpx-aiohttp aiosignal==1.3.2 # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via do-gradientai + # via gradient # via httpx argcomplete==3.1.2 # via nox @@ -37,7 +37,7 @@ dirty-equals==0.6.0 distlib==0.3.7 # via virtualenv distro==1.8.0 - # via do-gradientai + # via gradient exceptiongroup==1.2.2 # via anyio # via pytest @@ -53,11 +53,11 @@ h11==0.16.0 httpcore==1.0.9 # via httpx httpx==0.28.1 - # via do-gradientai + # via gradient # via httpx-aiohttp # via respx httpx-aiohttp==0.1.8 - # via do-gradientai + # via gradient idna==3.4 # via anyio # via httpx @@ -90,7 +90,7 @@ propcache==0.3.1 # via aiohttp # via yarl pydantic==2.10.3 - # via do-gradientai + # via gradient pydantic-core==2.27.1 # via pydantic pygments==2.18.0 @@ -114,14 +114,14 @@ six==1.16.0 # via python-dateutil sniffio==1.3.0 # via anyio - # via do-gradientai + # via gradient time-machine==2.9.0 tomli==2.0.2 # via mypy # via pytest typing-extensions==4.12.2 # via anyio - # via do-gradientai + # via gradient # via multidict # via mypy # via pydantic diff --git a/requirements.lock b/requirements.lock index 33a3cfb2..f9072669 100644 --- a/requirements.lock +++ b/requirements.lock @@ -13,14 +13,14 @@ aiohappyeyeballs==2.6.1 # via aiohttp aiohttp==3.12.8 - # via do-gradientai + # via gradient # via httpx-aiohttp aiosignal==1.3.2 # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 - # via do-gradientai + # via gradient # via httpx async-timeout==5.0.1 # via aiohttp @@ -30,7 +30,7 @@ certifi==2023.7.22 # via httpcore # via httpx distro==1.8.0 - # via do-gradientai + # via gradient exceptiongroup==1.2.2 # via anyio frozenlist==1.6.2 @@ -41,10 +41,10 @@ h11==0.16.0 httpcore==1.0.9 # via httpx httpx==0.28.1 - # via do-gradientai + # via gradient # via httpx-aiohttp httpx-aiohttp==0.1.8 - # via do-gradientai + # via gradient idna==3.4 # via anyio # via httpx @@ -56,15 +56,15 @@ propcache==0.3.1 # via aiohttp # via yarl pydantic==2.10.3 - # via do-gradientai + # via gradient pydantic-core==2.27.1 # via pydantic sniffio==1.3.0 # via anyio - # via do-gradientai + # via gradient typing-extensions==4.12.2 # via anyio - # via do-gradientai + # via gradient # via multidict # via pydantic # via pydantic-core diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py index e715ce61..24f65772 100644 --- a/src/do_gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -32,11 +32,12 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, gpu_droplets, knowledge_bases + from .resources import chat, agents, models, regions, databases, inference, gpu_droplets, knowledge_bases from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.models.models import ModelsResource, AsyncModelsResource + from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.gpu_droplets.gpu_droplets import GPUDropletsResource, AsyncGPUDropletsResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -168,6 +169,12 @@ def regions(self) -> RegionsResource: return RegionsResource(self) + @cached_property + def databases(self) -> DatabasesResource: + from .resources.databases import DatabasesResource + + return DatabasesResource(self) + @cached_property def with_raw_response(self) -> GradientAIWithRawResponse: return GradientAIWithRawResponse(self) @@ -417,6 +424,12 @@ def regions(self) -> AsyncRegionsResource: return AsyncRegionsResource(self) + @cached_property + def databases(self) -> AsyncDatabasesResource: + from .resources.databases import AsyncDatabasesResource + + return AsyncDatabasesResource(self) + @cached_property def with_raw_response(self) -> AsyncGradientAIWithRawResponse: return AsyncGradientAIWithRawResponse(self) @@ -599,6 +612,12 @@ def regions(self) -> regions.RegionsResourceWithRawResponse: return RegionsResourceWithRawResponse(self._client.regions) + @cached_property + def databases(self) -> databases.DatabasesResourceWithRawResponse: + from .resources.databases import DatabasesResourceWithRawResponse + + return DatabasesResourceWithRawResponse(self._client.databases) + class AsyncGradientAIWithRawResponse: _client: AsyncGradientAI @@ -648,6 +667,12 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: return AsyncRegionsResourceWithRawResponse(self._client.regions) + @cached_property + def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse: + from .resources.databases import AsyncDatabasesResourceWithRawResponse + + return AsyncDatabasesResourceWithRawResponse(self._client.databases) + class GradientAIWithStreamedResponse: _client: GradientAI @@ -697,6 +722,12 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse: return RegionsResourceWithStreamingResponse(self._client.regions) + @cached_property + def databases(self) -> databases.DatabasesResourceWithStreamingResponse: + from .resources.databases import DatabasesResourceWithStreamingResponse + + return DatabasesResourceWithStreamingResponse(self._client.databases) + class AsyncGradientAIWithStreamedResponse: _client: AsyncGradientAI @@ -746,6 +777,12 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: return AsyncRegionsResourceWithStreamingResponse(self._client.regions) + @cached_property + def databases(self) -> databases.AsyncDatabasesResourceWithStreamingResponse: + from .resources.databases import AsyncDatabasesResourceWithStreamingResponse + + return AsyncDatabasesResourceWithStreamingResponse(self._client.databases) + Client = GradientAI diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py index 45abd6a3..d5198560 100644 --- a/src/do_gradientai/resources/__init__.py +++ b/src/do_gradientai/resources/__init__.py @@ -32,6 +32,14 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) +from .databases import ( + DatabasesResource, + AsyncDatabasesResource, + DatabasesResourceWithRawResponse, + AsyncDatabasesResourceWithRawResponse, + DatabasesResourceWithStreamingResponse, + AsyncDatabasesResourceWithStreamingResponse, +) from .inference import ( InferenceResource, AsyncInferenceResource, @@ -100,4 +108,10 @@ "AsyncRegionsResourceWithRawResponse", "RegionsResourceWithStreamingResponse", "AsyncRegionsResourceWithStreamingResponse", + "DatabasesResource", + "AsyncDatabasesResource", + "DatabasesResourceWithRawResponse", + "AsyncDatabasesResourceWithRawResponse", + "DatabasesResourceWithStreamingResponse", + "AsyncDatabasesResourceWithStreamingResponse", ] diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py index ce687621..92449820 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py +++ b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py @@ -8,6 +8,22 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) from .workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -38,6 +54,18 @@ "AsyncModelsResourceWithRawResponse", "ModelsResourceWithStreamingResponse", "AsyncModelsResourceWithStreamingResponse", + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py new file mode 100644 index 00000000..057a3a2f --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py new file mode 100644 index 00000000..1532f98e --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AnthropicResource", "AsyncAnthropicResource"] + + +class AnthropicResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AnthropicResourceWithStreamingResponse(self) + + +class AsyncAnthropicResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAnthropicResourceWithStreamingResponse(self) + + +class AnthropicResourceWithRawResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithRawResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._anthropic.keys) + + +class AnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py new file mode 100644 index 00000000..959e786b --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py @@ -0,0 +1,711 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.agents.evaluation_metrics.anthropic import ( + key_list_params, + key_create_params, + key_update_params, + key_list_agents_params, +) +from .....types.agents.evaluation_metrics.anthropic.key_list_response import KeyListResponse +from .....types.agents.evaluation_metrics.anthropic.key_create_response import KeyCreateResponse +from .....types.agents.evaluation_metrics.anthropic.key_delete_response import KeyDeleteResponse +from .....types.agents.evaluation_metrics.anthropic.key_update_response import KeyUpdateResponse +from .....types.agents.evaluation_metrics.anthropic.key_retrieve_response import KeyRetrieveResponse +from .....types.agents.evaluation_metrics.anthropic.key_list_agents_response import KeyListAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + api_key: Anthropic API key + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + api_key: Anthropic API key + + body_api_key_uuid: API key ID + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + api_key: Anthropic API key + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + api_key: Anthropic API key + + body_api_key_uuid: API key ID + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = to_raw_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_raw_response_wrapper( + keys.list_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = to_streamed_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_streamed_response_wrapper( + keys.list_agents, + ) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py index edf708df..533a68bd 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -22,8 +22,24 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .openai.openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) from ...._base_client import make_request_options from ....types.agents import evaluation_metric_list_regions_params +from .anthropic.anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) from .workspaces.workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -47,6 +63,14 @@ def workspaces(self) -> WorkspacesResource: def models(self) -> ModelsResource: return ModelsResource(self._client) + @cached_property + def anthropic(self) -> AnthropicResource: + return AnthropicResource(self._client) + + @cached_property + def openai(self) -> OpenAIResource: + return OpenAIResource(self._client) + @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -148,6 +172,14 @@ def workspaces(self) -> AsyncWorkspacesResource: def models(self) -> AsyncModelsResource: return AsyncModelsResource(self._client) + @cached_property + def anthropic(self) -> AsyncAnthropicResource: + return AsyncAnthropicResource(self._client) + + @cached_property + def openai(self) -> AsyncOpenAIResource: + return AsyncOpenAIResource(self._client) + @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -259,6 +291,14 @@ def workspaces(self) -> WorkspacesResourceWithRawResponse: def models(self) -> ModelsResourceWithRawResponse: return ModelsResourceWithRawResponse(self._evaluation_metrics.models) + @cached_property + def anthropic(self) -> AnthropicResourceWithRawResponse: + return AnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithRawResponse: + return OpenAIResourceWithRawResponse(self._evaluation_metrics.openai) + class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -279,6 +319,14 @@ def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse: def models(self) -> AsyncModelsResourceWithRawResponse: return AsyncModelsResourceWithRawResponse(self._evaluation_metrics.models) + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: + return AsyncAnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithRawResponse: + return AsyncOpenAIResourceWithRawResponse(self._evaluation_metrics.openai) + class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -299,6 +347,14 @@ def workspaces(self) -> WorkspacesResourceWithStreamingResponse: def models(self) -> ModelsResourceWithStreamingResponse: return ModelsResourceWithStreamingResponse(self._evaluation_metrics.models) + @cached_property + def anthropic(self) -> AnthropicResourceWithStreamingResponse: + return AnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithStreamingResponse: + return OpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai) + class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -318,3 +374,11 @@ def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse: @cached_property def models(self) -> AsyncModelsResourceWithStreamingResponse: return AsyncModelsResourceWithStreamingResponse(self._evaluation_metrics.models) + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: + return AsyncAnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: + return AsyncOpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py new file mode 100644 index 00000000..66d8ca7a --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py b/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py new file mode 100644 index 00000000..33a71ae1 --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py @@ -0,0 +1,707 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.agents.evaluation_metrics.openai import ( + key_list_params, + key_create_params, + key_update_params, + key_list_agents_params, +) +from .....types.agents.evaluation_metrics.openai.key_list_response import KeyListResponse +from .....types.agents.evaluation_metrics.openai.key_create_response import KeyCreateResponse +from .....types.agents.evaluation_metrics.openai.key_delete_response import KeyDeleteResponse +from .....types.agents.evaluation_metrics.openai.key_update_response import KeyUpdateResponse +from .....types.agents.evaluation_metrics.openai.key_retrieve_response import KeyRetrieveResponse +from .....types.agents.evaluation_metrics.openai.key_list_agents_response import KeyListAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + api_key: OpenAI API key + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + api_key: OpenAI API key + + body_api_key_uuid: API key ID + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + api_key: OpenAI API key + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + api_key: OpenAI API key + + body_api_key_uuid: API key ID + + name: Name of the key + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: Page number. + + per_page: Items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = to_raw_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_raw_response_wrapper( + keys.list_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = to_streamed_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_streamed_response_wrapper( + keys.list_agents, + ) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py b/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py new file mode 100644 index 00000000..d66dbbde --- /dev/null +++ b/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["OpenAIResource", "AsyncOpenAIResource"] + + +class OpenAIResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> OpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return OpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return OpenAIResourceWithStreamingResponse(self) + + +class AsyncOpenAIResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncOpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncOpenAIResourceWithStreamingResponse(self) + + +class OpenAIResourceWithRawResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithRawResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._openai.keys) + + +class OpenAIResourceWithStreamingResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithStreamingResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/do_gradientai/resources/databases/__init__.py b/src/do_gradientai/resources/databases/__init__.py new file mode 100644 index 00000000..40c62ed8 --- /dev/null +++ b/src/do_gradientai/resources/databases/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .databases import ( + DatabasesResource, + AsyncDatabasesResource, + DatabasesResourceWithRawResponse, + AsyncDatabasesResourceWithRawResponse, + DatabasesResourceWithStreamingResponse, + AsyncDatabasesResourceWithStreamingResponse, +) +from .schema_registry import ( + SchemaRegistryResource, + AsyncSchemaRegistryResource, + SchemaRegistryResourceWithRawResponse, + AsyncSchemaRegistryResourceWithRawResponse, + SchemaRegistryResourceWithStreamingResponse, + AsyncSchemaRegistryResourceWithStreamingResponse, +) + +__all__ = [ + "SchemaRegistryResource", + "AsyncSchemaRegistryResource", + "SchemaRegistryResourceWithRawResponse", + "AsyncSchemaRegistryResourceWithRawResponse", + "SchemaRegistryResourceWithStreamingResponse", + "AsyncSchemaRegistryResourceWithStreamingResponse", + "DatabasesResource", + "AsyncDatabasesResource", + "DatabasesResourceWithRawResponse", + "AsyncDatabasesResourceWithRawResponse", + "DatabasesResourceWithStreamingResponse", + "AsyncDatabasesResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/databases/databases.py b/src/do_gradientai/resources/databases/databases.py new file mode 100644 index 00000000..e1f990d5 --- /dev/null +++ b/src/do_gradientai/resources/databases/databases.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .schema_registry.schema_registry import ( + SchemaRegistryResource, + AsyncSchemaRegistryResource, + SchemaRegistryResourceWithRawResponse, + AsyncSchemaRegistryResourceWithRawResponse, + SchemaRegistryResourceWithStreamingResponse, + AsyncSchemaRegistryResourceWithStreamingResponse, +) + +__all__ = ["DatabasesResource", "AsyncDatabasesResource"] + + +class DatabasesResource(SyncAPIResource): + @cached_property + def schema_registry(self) -> SchemaRegistryResource: + return SchemaRegistryResource(self._client) + + @cached_property + def with_raw_response(self) -> DatabasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DatabasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DatabasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DatabasesResourceWithStreamingResponse(self) + + +class AsyncDatabasesResource(AsyncAPIResource): + @cached_property + def schema_registry(self) -> AsyncSchemaRegistryResource: + return AsyncSchemaRegistryResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncDatabasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDatabasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDatabasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDatabasesResourceWithStreamingResponse(self) + + +class DatabasesResourceWithRawResponse: + def __init__(self, databases: DatabasesResource) -> None: + self._databases = databases + + @cached_property + def schema_registry(self) -> SchemaRegistryResourceWithRawResponse: + return SchemaRegistryResourceWithRawResponse(self._databases.schema_registry) + + +class AsyncDatabasesResourceWithRawResponse: + def __init__(self, databases: AsyncDatabasesResource) -> None: + self._databases = databases + + @cached_property + def schema_registry(self) -> AsyncSchemaRegistryResourceWithRawResponse: + return AsyncSchemaRegistryResourceWithRawResponse(self._databases.schema_registry) + + +class DatabasesResourceWithStreamingResponse: + def __init__(self, databases: DatabasesResource) -> None: + self._databases = databases + + @cached_property + def schema_registry(self) -> SchemaRegistryResourceWithStreamingResponse: + return SchemaRegistryResourceWithStreamingResponse(self._databases.schema_registry) + + +class AsyncDatabasesResourceWithStreamingResponse: + def __init__(self, databases: AsyncDatabasesResource) -> None: + self._databases = databases + + @cached_property + def schema_registry(self) -> AsyncSchemaRegistryResourceWithStreamingResponse: + return AsyncSchemaRegistryResourceWithStreamingResponse(self._databases.schema_registry) diff --git a/src/do_gradientai/resources/databases/schema_registry/__init__.py b/src/do_gradientai/resources/databases/schema_registry/__init__.py new file mode 100644 index 00000000..2015e4d4 --- /dev/null +++ b/src/do_gradientai/resources/databases/schema_registry/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .config import ( + ConfigResource, + AsyncConfigResource, + ConfigResourceWithRawResponse, + AsyncConfigResourceWithRawResponse, + ConfigResourceWithStreamingResponse, + AsyncConfigResourceWithStreamingResponse, +) +from .schema_registry import ( + SchemaRegistryResource, + AsyncSchemaRegistryResource, + SchemaRegistryResourceWithRawResponse, + AsyncSchemaRegistryResourceWithRawResponse, + SchemaRegistryResourceWithStreamingResponse, + AsyncSchemaRegistryResourceWithStreamingResponse, +) + +__all__ = [ + "ConfigResource", + "AsyncConfigResource", + "ConfigResourceWithRawResponse", + "AsyncConfigResourceWithRawResponse", + "ConfigResourceWithStreamingResponse", + "AsyncConfigResourceWithStreamingResponse", + "SchemaRegistryResource", + "AsyncSchemaRegistryResource", + "SchemaRegistryResourceWithRawResponse", + "AsyncSchemaRegistryResourceWithRawResponse", + "SchemaRegistryResourceWithStreamingResponse", + "AsyncSchemaRegistryResourceWithStreamingResponse", +] diff --git a/src/do_gradientai/resources/databases/schema_registry/config.py b/src/do_gradientai/resources/databases/schema_registry/config.py new file mode 100644 index 00000000..a815b84e --- /dev/null +++ b/src/do_gradientai/resources/databases/schema_registry/config.py @@ -0,0 +1,506 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.databases.schema_registry import config_update_params, config_update_subject_params +from ....types.databases.schema_registry.config_update_response import ConfigUpdateResponse +from ....types.databases.schema_registry.config_retrieve_response import ConfigRetrieveResponse +from ....types.databases.schema_registry.config_update_subject_response import ConfigUpdateSubjectResponse +from ....types.databases.schema_registry.config_retrieve_subject_response import ConfigRetrieveSubjectResponse + +__all__ = ["ConfigResource", "AsyncConfigResource"] + + +class ConfigResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ConfigResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ConfigResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ConfigResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ConfigResourceWithStreamingResponse(self) + + def retrieve( + self, + database_cluster_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigRetrieveResponse: + """ + To retrieve the Schema Registry configuration for a Kafka cluster, send a GET + request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is + a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + return self._get( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigRetrieveResponse, + ) + + def update( + self, + database_cluster_uuid: str, + *, + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigUpdateResponse: + """ + To update the Schema Registry configuration for a Kafka cluster, send a PUT + request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is + a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + compatibility_level: The compatibility level of the schema registry. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + return self._put( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config", + body=maybe_transform({"compatibility_level": compatibility_level}, config_update_params.ConfigUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigUpdateResponse, + ) + + def retrieve_subject( + self, + subject_name: str, + *, + database_cluster_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigRetrieveSubjectResponse: + """ + To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, + send a GET request to + `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response + is a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + if not subject_name: + raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}") + return self._get( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigRetrieveSubjectResponse, + ) + + def update_subject( + self, + subject_name: str, + *, + database_cluster_uuid: str, + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigUpdateSubjectResponse: + """ + To update the Schema Registry configuration for a Subject of a Kafka cluster, + send a PUT request to + `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response + is a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + compatibility_level: The compatibility level of the schema registry. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + if not subject_name: + raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}") + return self._put( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}", + body=maybe_transform( + {"compatibility_level": compatibility_level}, config_update_subject_params.ConfigUpdateSubjectParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigUpdateSubjectResponse, + ) + + +class AsyncConfigResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncConfigResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncConfigResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncConfigResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncConfigResourceWithStreamingResponse(self) + + async def retrieve( + self, + database_cluster_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigRetrieveResponse: + """ + To retrieve the Schema Registry configuration for a Kafka cluster, send a GET + request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is + a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + return await self._get( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigRetrieveResponse, + ) + + async def update( + self, + database_cluster_uuid: str, + *, + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigUpdateResponse: + """ + To update the Schema Registry configuration for a Kafka cluster, send a PUT + request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is + a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + compatibility_level: The compatibility level of the schema registry. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + return await self._put( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config", + body=await async_maybe_transform( + {"compatibility_level": compatibility_level}, config_update_params.ConfigUpdateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigUpdateResponse, + ) + + async def retrieve_subject( + self, + subject_name: str, + *, + database_cluster_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigRetrieveSubjectResponse: + """ + To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, + send a GET request to + `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response + is a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + if not subject_name: + raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}") + return await self._get( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigRetrieveSubjectResponse, + ) + + async def update_subject( + self, + subject_name: str, + *, + database_cluster_uuid: str, + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConfigUpdateSubjectResponse: + """ + To update the Schema Registry configuration for a Subject of a Kafka cluster, + send a PUT request to + `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response + is a JSON object with a `compatibility_level` key, which is set to an object + containing any database configuration parameters. + + Args: + compatibility_level: The compatibility level of the schema registry. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not database_cluster_uuid: + raise ValueError( + f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}" + ) + if not subject_name: + raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}") + return await self._put( + f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}", + body=await async_maybe_transform( + {"compatibility_level": compatibility_level}, config_update_subject_params.ConfigUpdateSubjectParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConfigUpdateSubjectResponse, + ) + + +class ConfigResourceWithRawResponse: + def __init__(self, config: ConfigResource) -> None: + self._config = config + + self.retrieve = to_raw_response_wrapper( + config.retrieve, + ) + self.update = to_raw_response_wrapper( + config.update, + ) + self.retrieve_subject = to_raw_response_wrapper( + config.retrieve_subject, + ) + self.update_subject = to_raw_response_wrapper( + config.update_subject, + ) + + +class AsyncConfigResourceWithRawResponse: + def __init__(self, config: AsyncConfigResource) -> None: + self._config = config + + self.retrieve = async_to_raw_response_wrapper( + config.retrieve, + ) + self.update = async_to_raw_response_wrapper( + config.update, + ) + self.retrieve_subject = async_to_raw_response_wrapper( + config.retrieve_subject, + ) + self.update_subject = async_to_raw_response_wrapper( + config.update_subject, + ) + + +class ConfigResourceWithStreamingResponse: + def __init__(self, config: ConfigResource) -> None: + self._config = config + + self.retrieve = to_streamed_response_wrapper( + config.retrieve, + ) + self.update = to_streamed_response_wrapper( + config.update, + ) + self.retrieve_subject = to_streamed_response_wrapper( + config.retrieve_subject, + ) + self.update_subject = to_streamed_response_wrapper( + config.update_subject, + ) + + +class AsyncConfigResourceWithStreamingResponse: + def __init__(self, config: AsyncConfigResource) -> None: + self._config = config + + self.retrieve = async_to_streamed_response_wrapper( + config.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + config.update, + ) + self.retrieve_subject = async_to_streamed_response_wrapper( + config.retrieve_subject, + ) + self.update_subject = async_to_streamed_response_wrapper( + config.update_subject, + ) diff --git a/src/do_gradientai/resources/databases/schema_registry/schema_registry.py b/src/do_gradientai/resources/databases/schema_registry/schema_registry.py new file mode 100644 index 00000000..6a0a44fb --- /dev/null +++ b/src/do_gradientai/resources/databases/schema_registry/schema_registry.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .config import ( + ConfigResource, + AsyncConfigResource, + ConfigResourceWithRawResponse, + AsyncConfigResourceWithRawResponse, + ConfigResourceWithStreamingResponse, + AsyncConfigResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["SchemaRegistryResource", "AsyncSchemaRegistryResource"] + + +class SchemaRegistryResource(SyncAPIResource): + @cached_property + def config(self) -> ConfigResource: + return ConfigResource(self._client) + + @cached_property + def with_raw_response(self) -> SchemaRegistryResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SchemaRegistryResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SchemaRegistryResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SchemaRegistryResourceWithStreamingResponse(self) + + +class AsyncSchemaRegistryResource(AsyncAPIResource): + @cached_property + def config(self) -> AsyncConfigResource: + return AsyncConfigResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncSchemaRegistryResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSchemaRegistryResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSchemaRegistryResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSchemaRegistryResourceWithStreamingResponse(self) + + +class SchemaRegistryResourceWithRawResponse: + def __init__(self, schema_registry: SchemaRegistryResource) -> None: + self._schema_registry = schema_registry + + @cached_property + def config(self) -> ConfigResourceWithRawResponse: + return ConfigResourceWithRawResponse(self._schema_registry.config) + + +class AsyncSchemaRegistryResourceWithRawResponse: + def __init__(self, schema_registry: AsyncSchemaRegistryResource) -> None: + self._schema_registry = schema_registry + + @cached_property + def config(self) -> AsyncConfigResourceWithRawResponse: + return AsyncConfigResourceWithRawResponse(self._schema_registry.config) + + +class SchemaRegistryResourceWithStreamingResponse: + def __init__(self, schema_registry: SchemaRegistryResource) -> None: + self._schema_registry = schema_registry + + @cached_property + def config(self) -> ConfigResourceWithStreamingResponse: + return ConfigResourceWithStreamingResponse(self._schema_registry.config) + + +class AsyncSchemaRegistryResourceWithStreamingResponse: + def __init__(self, schema_registry: AsyncSchemaRegistryResource) -> None: + self._schema_registry = schema_registry + + @cached_property + def config(self) -> AsyncConfigResourceWithStreamingResponse: + return AsyncConfigResourceWithStreamingResponse(self._schema_registry.config) diff --git a/src/do_gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py index 41f2eabd..3800c03c 100644 --- a/src/do_gradientai/resources/models/models.py +++ b/src/do_gradientai/resources/models/models.py @@ -2,9 +2,14 @@ from __future__ import annotations +from typing import List +from typing_extensions import Literal + import httpx +from ...types import model_list_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -23,7 +28,6 @@ AsyncProvidersResourceWithStreamingResponse, ) from ...types.model_list_response import ModelListResponse -from ...types.model_retrieve_response import ModelRetrieveResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -52,22 +56,52 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def retrieve( + def list( self, - model: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> ModelListResponse: """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. + To list all models, send a GET request to `/v2/gen-ai/models`. Args: + page: Page number. + + per_page: Items per page. + + public_only: Only include models that are publicly available. + + usecases: Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -76,36 +110,24 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - f"/models/{model}" + "/v2/gen-ai/models" if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelRetrieveResponse, - ) - - def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -135,22 +157,52 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def retrieve( + async def list( self, - model: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> ModelListResponse: """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. + To list all models, send a GET request to `/v2/gen-ai/models`. Args: + page: Page number. + + per_page: Items per page. + + public_only: Only include models that are publicly available. + + usecases: Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -159,36 +211,24 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - f"/models/{model}" + "/v2/gen-ai/models" if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelRetrieveResponse, - ) - - async def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return await self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -198,9 +238,6 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_raw_response_wrapper( - models.retrieve, - ) self.list = to_raw_response_wrapper( models.list, ) @@ -214,9 +251,6 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_raw_response_wrapper( - models.retrieve, - ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -230,9 +264,6 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_streamed_response_wrapper( - models.retrieve, - ) self.list = to_streamed_response_wrapper( models.list, ) @@ -246,9 +277,6 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_streamed_response_wrapper( - models.retrieve, - ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py index 0e08e399..21266ef5 100644 --- a/src/do_gradientai/types/__init__.py +++ b/src/do_gradientai/types/__init__.py @@ -50,6 +50,7 @@ from .api_agent_model import APIAgentModel as APIAgentModel from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion +from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams @@ -66,7 +67,6 @@ from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams -from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse @@ -113,6 +113,12 @@ agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.model_rebuild(_parent_namespace_depth=0) agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.model_rebuild(_parent_namespace_depth=0) agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.model_rebuild(_parent_namespace_depth=0) + agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.model_rebuild( + _parent_namespace_depth=0 + ) + agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.model_rebuild( + _parent_namespace_depth=0 + ) agents.function_create_response.FunctionCreateResponse.model_rebuild(_parent_namespace_depth=0) agents.function_update_response.FunctionUpdateResponse.model_rebuild(_parent_namespace_depth=0) agents.function_delete_response.FunctionDeleteResponse.model_rebuild(_parent_namespace_depth=0) @@ -137,6 +143,8 @@ agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.update_forward_refs() # type: ignore agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.update_forward_refs() # type: ignore agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore agents.function_create_response.FunctionCreateResponse.update_forward_refs() # type: ignore agents.function_update_response.FunctionUpdateResponse.update_forward_refs() # type: ignore agents.function_delete_response.FunctionDeleteResponse.update_forward_refs() # type: ignore diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py new file mode 100644 index 00000000..eb47e709 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams +from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py new file mode 100644 index 00000000..55f44139 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + """Anthropic API key""" + + name: str + """Name of the key""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py new file mode 100644 index 00000000..24b7bbb2 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py new file mode 100644 index 00000000..b5d8584e --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py new file mode 100644 index 00000000..566c39f7 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListAgentsParams"] + + +class KeyListAgentsParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py new file mode 100644 index 00000000..633211cc --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ....._models import BaseModel +from ....shared.api_meta import APIMeta +from ....shared.api_links import APILinks + +__all__ = ["KeyListAgentsResponse"] + + +class KeyListAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" + + +from ....api_agent import APIAgent diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py new file mode 100644 index 00000000..1611dc03 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py new file mode 100644 index 00000000..edc9e75a --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ....._models import BaseModel +from ....shared.api_meta import APIMeta +from ....shared.api_links import APILinks +from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + """Api key infos""" + + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py new file mode 100644 index 00000000..a100ec29 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py new file mode 100644 index 00000000..0d542bbb --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ....._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + """Anthropic API key""" + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" + + name: str + """Name of the key""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py new file mode 100644 index 00000000..06fa2d18 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py new file mode 100644 index 00000000..eb47e709 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams +from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py new file mode 100644 index 00000000..5f4975dd --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + """OpenAI API key""" + + name: str + """Name of the key""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py new file mode 100644 index 00000000..4af7b872 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py new file mode 100644 index 00000000..f1ebc73a --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py new file mode 100644 index 00000000..566c39f7 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListAgentsParams"] + + +class KeyListAgentsParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py new file mode 100644 index 00000000..633211cc --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ....._models import BaseModel +from ....shared.api_meta import APIMeta +from ....shared.api_links import APILinks + +__all__ = ["KeyListAgentsResponse"] + + +class KeyListAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" + + +from ....api_agent import APIAgent diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py new file mode 100644 index 00000000..1611dc03 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py new file mode 100644 index 00000000..00738f68 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ....._models import BaseModel +from ....shared.api_meta import APIMeta +from ....shared.api_links import APILinks +from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + """Api key infos""" + + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py new file mode 100644 index 00000000..9ba42cd2 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py new file mode 100644 index 00000000..3960cf36 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ....._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + """OpenAI API key""" + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" + + name: str + """Name of the key""" diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py new file mode 100644 index 00000000..222a8416 --- /dev/null +++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel +from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/do_gradientai/types/databases/__init__.py b/src/do_gradientai/types/databases/__init__.py new file mode 100644 index 00000000..f8ee8b14 --- /dev/null +++ b/src/do_gradientai/types/databases/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/do_gradientai/types/databases/schema_registry/__init__.py b/src/do_gradientai/types/databases/schema_registry/__init__.py new file mode 100644 index 00000000..92c4e7a5 --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .config_update_params import ConfigUpdateParams as ConfigUpdateParams +from .config_update_response import ConfigUpdateResponse as ConfigUpdateResponse +from .config_retrieve_response import ConfigRetrieveResponse as ConfigRetrieveResponse +from .config_update_subject_params import ConfigUpdateSubjectParams as ConfigUpdateSubjectParams +from .config_update_subject_response import ConfigUpdateSubjectResponse as ConfigUpdateSubjectResponse +from .config_retrieve_subject_response import ConfigRetrieveSubjectResponse as ConfigRetrieveSubjectResponse diff --git a/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py b/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py new file mode 100644 index 00000000..583e4eec --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConfigRetrieveResponse"] + + +class ConfigRetrieveResponse(BaseModel): + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ] + """The compatibility level of the schema registry.""" diff --git a/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py b/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py new file mode 100644 index 00000000..ec9fea68 --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConfigRetrieveSubjectResponse"] + + +class ConfigRetrieveSubjectResponse(BaseModel): + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ] + """The compatibility level of the schema registry.""" + + subject_name: str + """The name of the schema subject.""" diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_params.py b/src/do_gradientai/types/databases/schema_registry/config_update_params.py new file mode 100644 index 00000000..b25c7e92 --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/config_update_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConfigUpdateParams"] + + +class ConfigUpdateParams(TypedDict, total=False): + compatibility_level: Required[ + Literal["NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"] + ] + """The compatibility level of the schema registry.""" diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_response.py b/src/do_gradientai/types/databases/schema_registry/config_update_response.py new file mode 100644 index 00000000..0df776af --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/config_update_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConfigUpdateResponse"] + + +class ConfigUpdateResponse(BaseModel): + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ] + """The compatibility level of the schema registry.""" diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py b/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py new file mode 100644 index 00000000..b935ba80 --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConfigUpdateSubjectParams"] + + +class ConfigUpdateSubjectParams(TypedDict, total=False): + database_cluster_uuid: Required[str] + + compatibility_level: Required[ + Literal["NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"] + ] + """The compatibility level of the schema registry.""" diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py b/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py new file mode 100644 index 00000000..3bb3cd24 --- /dev/null +++ b/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConfigUpdateSubjectResponse"] + + +class ConfigUpdateSubjectResponse(BaseModel): + compatibility_level: Literal[ + "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE" + ] + """The compatibility level of the schema registry.""" + + subject_name: str + """The name of the schema subject.""" diff --git a/src/do_gradientai/types/model_list_params.py b/src/do_gradientai/types/model_list_params.py new file mode 100644 index 00000000..a2fa066a --- /dev/null +++ b/src/do_gradientai/types/model_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["ModelListParams"] + + +class ModelListParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" + + public_only: bool + """Only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py index 5915bdd1..12d95437 100644 --- a/src/do_gradientai/types/model_list_response.py +++ b/src/do_gradientai/types/model_list_response.py @@ -1,28 +1,21 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal +from typing import List, Optional from .._models import BaseModel +from .api_model import APIModel +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks -__all__ = ["ModelListResponse", "Data"] - - -class Data(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - data: List[Data] + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" - object: Literal["list"] + models: Optional[List[APIModel]] = None + """The models""" diff --git a/src/do_gradientai/types/model_retrieve_response.py b/src/do_gradientai/types/model_retrieve_response.py deleted file mode 100644 index dd5de863..00000000 --- a/src/do_gradientai/types/model_retrieve_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ModelRetrieveResponse"] - - -class ModelRetrieveResponse(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py b/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py new file mode 100644 index 00000000..aff153a6 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py @@ -0,0 +1,557 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics.anthropic import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyListAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.create( + api_key='"sk-ant-12345678901234567890123456789012"', + name='"Production Key"', + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.anthropic.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_agents(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_agents(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.create( + api_key='"sk-ant-12345678901234567890123456789012"', + name='"Production Key"', + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) diff --git a/tests/api_resources/agents/evaluation_metrics/openai/__init__.py b/tests/api_resources/agents/evaluation_metrics/openai/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/openai/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py new file mode 100644 index 00000000..08404acc --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py @@ -0,0 +1,557 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics.openai import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyListAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.create( + api_key='"sk-proj--123456789098765432123456789"', + name='"Production Key"', + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.openai.keys.with_raw_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.openai.keys.with_streaming_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.agents.evaluation_metrics.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + key = client.agents.evaluation_metrics.openai.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_agents(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_agents(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.create( + api_key='"sk-proj--123456789098765432123456789"', + name='"Production Key"', + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.update( + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.agents.evaluation_metrics.openai.keys.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( + uuid="", + ) diff --git a/tests/api_resources/databases/__init__.py b/tests/api_resources/databases/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/databases/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/databases/schema_registry/__init__.py b/tests/api_resources/databases/schema_registry/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/databases/schema_registry/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/databases/schema_registry/test_config.py b/tests/api_resources/databases/schema_registry/test_config.py new file mode 100644 index 00000000..f63d62c0 --- /dev/null +++ b/tests/api_resources/databases/schema_registry/test_config.py @@ -0,0 +1,423 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from tests.utils import assert_matches_type +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.databases.schema_registry import ( + ConfigUpdateResponse, + ConfigRetrieveResponse, + ConfigUpdateSubjectResponse, + ConfigRetrieveSubjectResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestConfig: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + config = client.databases.schema_registry.config.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.databases.schema_registry.config.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = response.parse() + assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.databases.schema_registry.config.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = response.parse() + assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + client.databases.schema_registry.config.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + config = client.databases.schema_registry.config.update( + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + assert_matches_type(ConfigUpdateResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.databases.schema_registry.config.with_raw_response.update( + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = response.parse() + assert_matches_type(ConfigUpdateResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.databases.schema_registry.config.with_streaming_response.update( + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = response.parse() + assert_matches_type(ConfigUpdateResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + client.databases.schema_registry.config.with_raw_response.update( + database_cluster_uuid="", + compatibility_level="BACKWARD", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_subject(self, client: GradientAI) -> None: + config = client.databases.schema_registry.config.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) + assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_subject(self, client: GradientAI) -> None: + response = client.databases.schema_registry.config.with_raw_response.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = response.parse() + assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_subject(self, client: GradientAI) -> None: + with client.databases.schema_registry.config.with_streaming_response.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = response.parse() + assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_subject(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + client.databases.schema_registry.config.with_raw_response.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"): + client.databases.schema_registry.config.with_raw_response.retrieve_subject( + subject_name="", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_subject(self, client: GradientAI) -> None: + config = client.databases.schema_registry.config.update_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_subject(self, client: GradientAI) -> None: + response = client.databases.schema_registry.config.with_raw_response.update_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = response.parse() + assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_subject(self, client: GradientAI) -> None: + with client.databases.schema_registry.config.with_streaming_response.update_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = response.parse() + assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_subject(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + client.databases.schema_registry.config.with_raw_response.update_subject( + subject_name="customer-schema", + database_cluster_uuid="", + compatibility_level="BACKWARD", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"): + client.databases.schema_registry.config.with_raw_response.update_subject( + subject_name="", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + + +class TestAsyncConfig: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + config = await async_client.databases.schema_registry.config.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.databases.schema_registry.config.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = await response.parse() + assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.databases.schema_registry.config.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = await response.parse() + assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + await async_client.databases.schema_registry.config.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + config = await async_client.databases.schema_registry.config.update( + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + assert_matches_type(ConfigUpdateResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.databases.schema_registry.config.with_raw_response.update( + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = await response.parse() + assert_matches_type(ConfigUpdateResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.databases.schema_registry.config.with_streaming_response.update( + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = await response.parse() + assert_matches_type(ConfigUpdateResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + await async_client.databases.schema_registry.config.with_raw_response.update( + database_cluster_uuid="", + compatibility_level="BACKWARD", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + config = await async_client.databases.schema_registry.config.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) + assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + response = await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = await response.parse() + assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + async with async_client.databases.schema_registry.config.with_streaming_response.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = await response.parse() + assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject( + subject_name="customer-schema", + database_cluster_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"): + await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject( + subject_name="", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_subject(self, async_client: AsyncGradientAI) -> None: + config = await async_client.databases.schema_registry.config.update_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_subject(self, async_client: AsyncGradientAI) -> None: + response = await async_client.databases.schema_registry.config.with_raw_response.update_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + config = await response.parse() + assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_subject(self, async_client: AsyncGradientAI) -> None: + async with async_client.databases.schema_registry.config.with_streaming_response.update_subject( + subject_name="customer-schema", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + config = await response.parse() + assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_subject(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): + await async_client.databases.schema_registry.config.with_raw_response.update_subject( + subject_name="customer-schema", + database_cluster_uuid="", + compatibility_level="BACKWARD", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"): + await async_client.databases.schema_registry.config.with_raw_response.update_subject( + subject_name="", + database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", + compatibility_level="BACKWARD", + ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 803c5d5a..f7e21015 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from tests.utils import assert_matches_type from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ModelListResponse, ModelRetrieveResponse +from do_gradientai.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,50 +19,19 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - model = client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + def test_method_list(self, client: GradientAI) -> None: + model = client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.models.with_raw_response.retrieve( - "llama3-8b-instruct", + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @@ -95,50 +64,19 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.models.with_raw_response.retrieve( - "llama3-8b-instruct", + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() From 635843721acfa49d0d7aaf35d9c39abc08615858 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 19:05:02 +0000 Subject: [PATCH 140/200] feat(api): update to package gradient --- .github/workflows/publish-pypi.yml | 4 +- .github/workflows/release-doctor.yml | 4 +- .stats.yml | 2 +- CONTRIBUTING.md | 6 +- LICENSE | 2 +- README.md | 88 ++-- SECURITY.md | 2 +- api.md | 460 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 18 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{do_gradientai => gradient}/__init__.py | 16 +- .../_base_client.py | 2 +- src/{do_gradientai => gradient}/_client.py | 60 +-- src/{do_gradientai => gradient}/_compat.py | 0 src/{do_gradientai => gradient}/_constants.py | 0 .../_exceptions.py | 4 +- src/{do_gradientai => gradient}/_files.py | 0 src/{do_gradientai => gradient}/_models.py | 0 src/{do_gradientai => gradient}/_qs.py | 0 src/{do_gradientai => gradient}/_resource.py | 10 +- src/{do_gradientai => gradient}/_response.py | 14 +- src/{do_gradientai => gradient}/_streaming.py | 6 +- src/{do_gradientai => gradient}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 6 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{do_gradientai => gradient}/_version.py | 2 +- src/gradient/lib/.keep | 4 + src/{do_gradientai => gradient}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 8 +- .../resources/agents/api_keys.py | 8 +- .../resources/agents/chat/__init__.py | 0 .../resources/agents/chat/chat.py | 8 +- .../resources/agents/chat/completions.py | 8 +- .../resources/agents/evaluation_datasets.py | 8 +- .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/anthropic/__init__.py | 0 .../evaluation_metrics/anthropic/anthropic.py | 8 +- .../evaluation_metrics/anthropic/keys.py | 8 +- .../evaluation_metrics/evaluation_metrics.py | 8 +- .../agents/evaluation_metrics/models.py | 8 +- .../evaluation_metrics/openai/__init__.py | 0 .../agents/evaluation_metrics/openai/keys.py | 8 +- .../evaluation_metrics/openai/openai.py | 8 +- .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 8 +- .../workspaces/workspaces.py | 8 +- .../resources/agents/evaluation_runs.py | 8 +- .../resources/agents/evaluation_test_cases.py | 8 +- .../resources/agents/functions.py | 8 +- .../resources/agents/knowledge_bases.py | 8 +- .../resources/agents/routes.py | 8 +- .../resources/agents/versions.py | 8 +- .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 8 +- .../resources/chat/completions.py | 8 +- .../resources/databases/__init__.py | 0 .../resources/databases/databases.py | 8 +- .../databases/schema_registry/__init__.py | 0 .../databases/schema_registry/config.py | 8 +- .../schema_registry/schema_registry.py | 8 +- .../resources/gpu_droplets/__init__.py | 0 .../gpu_droplets/account/__init__.py | 0 .../resources/gpu_droplets/account/account.py | 8 +- .../resources/gpu_droplets/account/keys.py | 8 +- .../resources/gpu_droplets/actions.py | 8 +- .../resources/gpu_droplets/autoscale.py | 8 +- .../resources/gpu_droplets/backups.py | 8 +- .../destroy_with_associated_resources.py | 8 +- .../gpu_droplets/firewalls/__init__.py | 0 .../gpu_droplets/firewalls/droplets.py | 8 +- .../gpu_droplets/firewalls/firewalls.py | 8 +- .../resources/gpu_droplets/firewalls/rules.py | 8 +- .../resources/gpu_droplets/firewalls/tags.py | 8 +- .../gpu_droplets/floating_ips/__init__.py | 0 .../gpu_droplets/floating_ips/actions.py | 8 +- .../gpu_droplets/floating_ips/floating_ips.py | 8 +- .../resources/gpu_droplets/gpu_droplets.py | 8 +- .../resources/gpu_droplets/images/__init__.py | 0 .../resources/gpu_droplets/images/actions.py | 8 +- .../resources/gpu_droplets/images/images.py | 8 +- .../gpu_droplets/load_balancers/__init__.py | 0 .../gpu_droplets/load_balancers/droplets.py | 8 +- .../load_balancers/forwarding_rules.py | 8 +- .../load_balancers/load_balancers.py | 8 +- .../resources/gpu_droplets/sizes.py | 8 +- .../resources/gpu_droplets/snapshots.py | 8 +- .../gpu_droplets/volumes/__init__.py | 0 .../resources/gpu_droplets/volumes/actions.py | 8 +- .../gpu_droplets/volumes/snapshots.py | 8 +- .../resources/gpu_droplets/volumes/volumes.py | 8 +- .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 8 +- .../resources/inference/inference.py | 8 +- .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 8 +- .../knowledge_bases/indexing_jobs.py | 8 +- .../knowledge_bases/knowledge_bases.py | 8 +- .../resources/models/__init__.py | 0 .../resources/models/models.py | 8 +- .../resources/models/providers/__init__.py | 0 .../resources/models/providers/anthropic.py | 8 +- .../resources/models/providers/openai.py | 8 +- .../resources/models/providers/providers.py | 8 +- .../resources/regions.py | 8 +- .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 .../types/agents/chat/__init__.py | 0 .../agents/chat/completion_create_params.py | 0 .../agents/chat/completion_create_response.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_regions_params.py | 0 ...evaluation_metric_list_regions_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/anthropic/__init__.py | 0 .../anthropic/key_create_params.py | 0 .../anthropic/key_create_response.py | 0 .../anthropic/key_delete_response.py | 0 .../anthropic/key_list_agents_params.py | 0 .../anthropic/key_list_agents_response.py | 0 .../anthropic/key_list_params.py | 0 .../anthropic/key_list_response.py | 0 .../anthropic/key_retrieve_response.py | 0 .../anthropic/key_update_params.py | 0 .../anthropic/key_update_response.py | 0 .../evaluation_metrics/model_list_params.py | 0 .../evaluation_metrics/model_list_response.py | 0 .../evaluation_metrics/openai/__init__.py | 0 .../openai/key_create_params.py | 0 .../openai/key_create_response.py | 0 .../openai/key_delete_response.py | 0 .../openai/key_list_agents_params.py | 0 .../openai/key_list_agents_response.py | 0 .../openai/key_list_params.py | 0 .../openai/key_list_response.py | 0 .../openai/key_retrieve_response.py | 0 .../openai/key_update_params.py | 0 .../openai/key_update_response.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_params.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/databases/__init__.py | 0 .../databases/schema_registry/__init__.py | 0 .../config_retrieve_response.py | 0 .../config_retrieve_subject_response.py | 0 .../schema_registry/config_update_params.py | 0 .../schema_registry/config_update_response.py | 0 .../config_update_subject_params.py | 0 .../config_update_subject_response.py | 0 .../types/droplet_backup_policy.py | 0 .../types/droplet_backup_policy_param.py | 0 .../types/gpu_droplet_create_params.py | 0 .../types/gpu_droplet_create_response.py | 0 .../types/gpu_droplet_delete_by_tag_params.py | 0 .../gpu_droplet_list_firewalls_params.py | 0 .../gpu_droplet_list_firewalls_response.py | 0 .../types/gpu_droplet_list_kernels_params.py | 0 .../gpu_droplet_list_kernels_response.py | 0 .../gpu_droplet_list_neighbors_response.py | 0 .../types/gpu_droplet_list_params.py | 0 .../types/gpu_droplet_list_response.py | 0 .../gpu_droplet_list_snapshots_params.py | 0 .../gpu_droplet_list_snapshots_response.py | 0 .../types/gpu_droplet_retrieve_response.py | 0 .../types/gpu_droplets/__init__.py | 0 .../types/gpu_droplets/account/__init__.py | 0 .../gpu_droplets/account/key_create_params.py | 0 .../account/key_create_response.py | 0 .../gpu_droplets/account/key_list_params.py | 0 .../gpu_droplets/account/key_list_response.py | 0 .../account/key_retrieve_response.py | 0 .../gpu_droplets/account/key_update_params.py | 0 .../account/key_update_response.py | 0 .../action_bulk_initiate_params.py | 0 .../action_bulk_initiate_response.py | 0 .../gpu_droplets/action_initiate_params.py | 0 .../gpu_droplets/action_initiate_response.py | 0 .../types/gpu_droplets/action_list_params.py | 0 .../gpu_droplets/action_list_response.py | 0 .../gpu_droplets/action_retrieve_response.py | 0 .../types/gpu_droplets/associated_resource.py | 0 .../gpu_droplets/autoscale_create_params.py | 0 .../gpu_droplets/autoscale_create_response.py | 0 .../autoscale_list_history_params.py | 0 .../autoscale_list_history_response.py | 0 .../autoscale_list_members_params.py | 0 .../autoscale_list_members_response.py | 0 .../gpu_droplets/autoscale_list_params.py | 0 .../gpu_droplets/autoscale_list_response.py | 0 .../types/gpu_droplets/autoscale_pool.py | 0 .../autoscale_pool_droplet_template.py | 0 .../autoscale_pool_droplet_template_param.py | 0 .../autoscale_pool_dynamic_config.py | 0 .../autoscale_pool_dynamic_config_param.py | 0 .../autoscale_pool_static_config.py | 0 .../autoscale_pool_static_config_param.py | 0 .../autoscale_retrieve_response.py | 0 .../gpu_droplets/autoscale_update_params.py | 0 .../gpu_droplets/autoscale_update_response.py | 0 .../types/gpu_droplets/backup_list_params.py | 0 .../backup_list_policies_params.py | 0 .../backup_list_policies_response.py | 0 .../gpu_droplets/backup_list_response.py | 0 ...backup_list_supported_policies_response.py | 0 .../backup_retrieve_policy_response.py | 0 .../types/gpu_droplets/current_utilization.py | 0 ...sociated_resource_check_status_response.py | 0 ...ciated_resource_delete_selective_params.py | 0 ..._with_associated_resource_list_response.py | 0 .../destroyed_associated_resource.py | 0 .../types/gpu_droplets/domains.py | 0 .../types/gpu_droplets/domains_param.py | 0 .../types/gpu_droplets/firewall.py | 0 .../gpu_droplets/firewall_create_params.py | 0 .../gpu_droplets/firewall_create_response.py | 0 .../gpu_droplets/firewall_list_params.py | 0 .../gpu_droplets/firewall_list_response.py | 0 .../types/gpu_droplets/firewall_param.py | 0 .../firewall_retrieve_response.py | 0 .../gpu_droplets/firewall_update_params.py | 0 .../gpu_droplets/firewall_update_response.py | 0 .../types/gpu_droplets/firewalls/__init__.py | 0 .../firewalls/droplet_add_params.py | 0 .../firewalls/droplet_remove_params.py | 0 .../gpu_droplets/firewalls/rule_add_params.py | 0 .../firewalls/rule_remove_params.py | 0 .../gpu_droplets/firewalls/tag_add_params.py | 0 .../firewalls/tag_remove_params.py | 0 .../types/gpu_droplets/floating_ip.py | 0 .../gpu_droplets/floating_ip_create_params.py | 0 .../floating_ip_create_response.py | 0 .../gpu_droplets/floating_ip_list_params.py | 0 .../gpu_droplets/floating_ip_list_response.py | 0 .../floating_ip_retrieve_response.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../floating_ips/action_create_params.py | 0 .../floating_ips/action_create_response.py | 0 .../floating_ips/action_list_response.py | 0 .../floating_ips/action_retrieve_response.py | 0 .../types/gpu_droplets/forwarding_rule.py | 0 .../gpu_droplets/forwarding_rule_param.py | 0 .../types/gpu_droplets/glb_settings.py | 0 .../types/gpu_droplets/glb_settings_param.py | 0 .../types/gpu_droplets/health_check.py | 0 .../types/gpu_droplets/health_check_param.py | 0 .../types/gpu_droplets/image_create_params.py | 0 .../gpu_droplets/image_create_response.py | 0 .../types/gpu_droplets/image_list_params.py | 0 .../types/gpu_droplets/image_list_response.py | 0 .../gpu_droplets/image_retrieve_response.py | 0 .../types/gpu_droplets/image_update_params.py | 0 .../gpu_droplets/image_update_response.py | 0 .../types/gpu_droplets/images/__init__.py | 0 .../images/action_create_params.py | 0 .../images/action_list_response.py | 0 .../types/gpu_droplets/lb_firewall.py | 0 .../types/gpu_droplets/lb_firewall_param.py | 0 .../types/gpu_droplets/load_balancer.py | 0 .../load_balancer_create_params.py | 0 .../load_balancer_create_response.py | 0 .../gpu_droplets/load_balancer_list_params.py | 0 .../load_balancer_list_response.py | 0 .../load_balancer_retrieve_response.py | 0 .../load_balancer_update_params.py | 0 .../load_balancer_update_response.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../load_balancers/droplet_add_params.py | 0 .../load_balancers/droplet_remove_params.py | 0 .../forwarding_rule_add_params.py | 0 .../forwarding_rule_remove_params.py | 0 .../types/gpu_droplets/size_list_params.py | 0 .../types/gpu_droplets/size_list_response.py | 0 .../gpu_droplets/snapshot_list_params.py | 0 .../gpu_droplets/snapshot_list_response.py | 0 .../snapshot_retrieve_response.py | 0 .../types/gpu_droplets/sticky_sessions.py | 0 .../gpu_droplets/sticky_sessions_param.py | 0 .../gpu_droplets/volume_create_params.py | 0 .../gpu_droplets/volume_create_response.py | 0 .../volume_delete_by_name_params.py | 0 .../types/gpu_droplets/volume_list_params.py | 0 .../gpu_droplets/volume_list_response.py | 0 .../gpu_droplets/volume_retrieve_response.py | 0 .../types/gpu_droplets/volumes/__init__.py | 0 .../volumes/action_initiate_by_id_params.py | 0 .../volumes/action_initiate_by_id_response.py | 0 .../volumes/action_initiate_by_name_params.py | 0 .../action_initiate_by_name_response.py | 0 .../volumes/action_list_params.py | 0 .../volumes/action_list_response.py | 0 .../volumes/action_retrieve_params.py | 0 .../volumes/action_retrieve_response.py | 0 .../volumes/snapshot_create_params.py | 0 .../volumes/snapshot_create_response.py | 0 .../volumes/snapshot_list_params.py | 0 .../volumes/snapshot_list_response.py | 0 .../volumes/snapshot_retrieve_response.py | 0 .../gpu_droplets/volumes/volume_action.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/model_list_params.py | 0 .../types/model_list_response.py | 0 .../types/models/__init__.py | 0 .../types/models/providers/__init__.py | 0 .../providers/anthropic_create_params.py | 0 .../providers/anthropic_create_response.py | 0 .../providers/anthropic_delete_response.py | 0 .../providers/anthropic_list_agents_params.py | 0 .../anthropic_list_agents_response.py | 0 .../models/providers/anthropic_list_params.py | 0 .../providers/anthropic_list_response.py | 0 .../providers/anthropic_retrieve_response.py | 0 .../providers/anthropic_update_params.py | 0 .../providers/anthropic_update_response.py | 0 .../models/providers/openai_create_params.py | 0 .../providers/openai_create_response.py | 0 .../providers/openai_delete_response.py | 0 .../models/providers/openai_list_params.py | 0 .../models/providers/openai_list_response.py | 0 .../openai_retrieve_agents_params.py | 0 .../openai_retrieve_agents_response.py | 0 .../providers/openai_retrieve_response.py | 0 .../models/providers/openai_update_params.py | 0 .../providers/openai_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/action.py | 0 .../types/shared/action_link.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../types/shared/backward_links.py | 0 .../types/shared/chat_completion_chunk.py | 0 .../shared/chat_completion_token_logprob.py | 0 .../types/shared/completion_usage.py | 0 .../types/shared/disk_info.py | 0 .../types/shared/droplet.py | 0 .../shared/droplet_next_backup_window.py | 0 .../types/shared/firewall_rule_target.py | 0 .../types/shared/forward_links.py | 0 .../types/shared/garbage_collection.py | 0 .../types/shared/gpu_info.py | 0 .../types/shared/image.py | 0 .../types/shared/kernel.py | 0 .../types/shared/meta_properties.py | 0 .../types/shared/network_v4.py | 0 .../types/shared/network_v6.py | 0 .../types/shared/page_links.py | 0 .../types/shared/region.py | 0 .../types/shared/size.py | 0 .../types/shared/snapshots.py | 0 .../types/shared/subscription.py | 0 .../types/shared/subscription_tier_base.py | 0 .../types/shared/vpc_peering.py | 0 .../types/shared_params/__init__.py | 0 .../shared_params/firewall_rule_target.py | 0 .../agents/chat/test_completions.py | 36 +- .../evaluation_metrics/anthropic/test_keys.py | 108 ++-- .../evaluation_metrics/openai/test_keys.py | 108 ++-- .../agents/evaluation_metrics/test_models.py | 20 +- .../evaluation_metrics/test_workspaces.py | 100 ++-- .../workspaces/test_agents.py | 44 +- tests/api_resources/agents/test_api_keys.py | 96 ++-- .../agents/test_evaluation_datasets.py | 38 +- .../agents/test_evaluation_metrics.py | 32 +- .../agents/test_evaluation_runs.py | 72 +-- .../agents/test_evaluation_test_cases.py | 92 ++-- tests/api_resources/agents/test_functions.py | 60 +-- .../agents/test_knowledge_bases.py | 52 +- tests/api_resources/agents/test_routes.py | 76 +-- tests/api_resources/agents/test_versions.py | 47 +- tests/api_resources/chat/test_completions.py | 36 +- .../databases/schema_registry/test_config.py | 68 +-- .../gpu_droplets/account/test_keys.py | 72 +-- .../gpu_droplets/firewalls/test_droplets.py | 34 +- .../gpu_droplets/firewalls/test_rules.py | 42 +- .../gpu_droplets/firewalls/test_tags.py | 34 +- .../gpu_droplets/floating_ips/test_actions.py | 68 +-- .../gpu_droplets/images/test_actions.py | 54 +- .../load_balancers/test_droplets.py | 34 +- .../load_balancers/test_forwarding_rules.py | 34 +- .../gpu_droplets/test_actions.py | 204 ++++---- .../gpu_droplets/test_autoscale.py | 144 +++--- .../gpu_droplets/test_backups.py | 60 +-- .../test_destroy_with_associated_resources.py | 68 +-- .../gpu_droplets/test_firewalls.py | 88 ++-- .../gpu_droplets/test_floating_ips.py | 80 +-- .../api_resources/gpu_droplets/test_images.py | 76 +-- .../gpu_droplets/test_load_balancers.py | 140 +++--- .../api_resources/gpu_droplets/test_sizes.py | 20 +- .../gpu_droplets/test_snapshots.py | 44 +- .../gpu_droplets/test_volumes.py | 100 ++-- .../gpu_droplets/volumes/test_actions.py | 136 +++--- .../gpu_droplets/volumes/test_snapshots.py | 76 +-- .../api_resources/inference/test_api_keys.py | 88 ++-- .../knowledge_bases/test_data_sources.py | 60 +-- .../knowledge_bases/test_indexing_jobs.py | 88 ++-- .../models/providers/test_anthropic.py | 108 ++-- .../models/providers/test_openai.py | 108 ++-- tests/api_resources/test_agents.py | 108 ++-- tests/api_resources/test_gpu_droplets.py | 148 +++--- tests/api_resources/test_knowledge_bases.py | 88 ++-- tests/api_resources/test_models.py | 20 +- tests/api_resources/test_regions.py | 20 +- tests/conftest.py | 14 +- tests/test_client.py | 202 ++++---- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 38 +- tests/test_streaming.py | 34 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 555 files changed, 2513 insertions(+), 2524 deletions(-) rename src/{do_gradientai => gradient}/__init__.py (90%) rename src/{do_gradientai => gradient}/_base_client.py (99%) rename src/{do_gradientai => gradient}/_client.py (95%) rename src/{do_gradientai => gradient}/_compat.py (100%) rename src/{do_gradientai => gradient}/_constants.py (100%) rename src/{do_gradientai => gradient}/_exceptions.py (97%) rename src/{do_gradientai => gradient}/_files.py (100%) rename src/{do_gradientai => gradient}/_models.py (100%) rename src/{do_gradientai => gradient}/_qs.py (100%) rename src/{do_gradientai => gradient}/_resource.py (80%) rename src/{do_gradientai => gradient}/_response.py (98%) rename src/{do_gradientai => gradient}/_streaming.py (99%) rename src/{do_gradientai => gradient}/_types.py (99%) rename src/{do_gradientai => gradient}/_utils/__init__.py (100%) rename src/{do_gradientai => gradient}/_utils/_logs.py (70%) rename src/{do_gradientai => gradient}/_utils/_proxy.py (100%) rename src/{do_gradientai => gradient}/_utils/_reflection.py (100%) rename src/{do_gradientai => gradient}/_utils/_resources_proxy.py (50%) rename src/{do_gradientai => gradient}/_utils/_streams.py (100%) rename src/{do_gradientai => gradient}/_utils/_sync.py (100%) rename src/{do_gradientai => gradient}/_utils/_transform.py (100%) rename src/{do_gradientai => gradient}/_utils/_typing.py (100%) rename src/{do_gradientai => gradient}/_utils/_utils.py (100%) rename src/{do_gradientai => gradient}/_version.py (83%) create mode 100644 src/gradient/lib/.keep rename src/{do_gradientai => gradient}/py.typed (100%) rename src/{do_gradientai => gradient}/resources/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/agents.py (99%) rename src/{do_gradientai => gradient}/resources/agents/api_keys.py (99%) rename src/{do_gradientai => gradient}/resources/agents/chat/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/chat/chat.py (93%) rename src/{do_gradientai => gradient}/resources/agents/chat/completions.py (99%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_datasets.py (98%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/anthropic/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/anthropic/anthropic.py (93%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/anthropic/keys.py (99%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/evaluation_metrics.py (98%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/models.py (97%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/openai/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/openai/keys.py (99%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/openai/openai.py (93%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/workspaces/agents.py (98%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_metrics/workspaces/workspaces.py (99%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_runs.py (98%) rename src/{do_gradientai => gradient}/resources/agents/evaluation_test_cases.py (99%) rename src/{do_gradientai => gradient}/resources/agents/functions.py (98%) rename src/{do_gradientai => gradient}/resources/agents/knowledge_bases.py (98%) rename src/{do_gradientai => gradient}/resources/agents/routes.py (99%) rename src/{do_gradientai => gradient}/resources/agents/versions.py (98%) rename src/{do_gradientai => gradient}/resources/chat/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/chat/chat.py (93%) rename src/{do_gradientai => gradient}/resources/chat/completions.py (99%) rename src/{do_gradientai => gradient}/resources/databases/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/databases/databases.py (94%) rename src/{do_gradientai => gradient}/resources/databases/schema_registry/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/databases/schema_registry/config.py (99%) rename src/{do_gradientai => gradient}/resources/databases/schema_registry/schema_registry.py (94%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/account/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/account/account.py (93%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/account/keys.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/actions.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/autoscale.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/backups.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/destroy_with_associated_resources.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/firewalls/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/firewalls/droplets.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/firewalls/firewalls.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/firewalls/rules.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/firewalls/tags.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/floating_ips/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/floating_ips/actions.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/floating_ips/floating_ips.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/gpu_droplets.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/images/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/images/actions.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/images/images.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/load_balancers/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/load_balancers/droplets.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/load_balancers/forwarding_rules.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/load_balancers/load_balancers.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/sizes.py (96%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/snapshots.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/volumes/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/volumes/actions.py (99%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/volumes/snapshots.py (98%) rename src/{do_gradientai => gradient}/resources/gpu_droplets/volumes/volumes.py (99%) rename src/{do_gradientai => gradient}/resources/inference/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/inference/api_keys.py (99%) rename src/{do_gradientai => gradient}/resources/inference/inference.py (94%) rename src/{do_gradientai => gradient}/resources/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/knowledge_bases/data_sources.py (98%) rename src/{do_gradientai => gradient}/resources/knowledge_bases/indexing_jobs.py (99%) rename src/{do_gradientai => gradient}/resources/knowledge_bases/knowledge_bases.py (99%) rename src/{do_gradientai => gradient}/resources/models/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/models/models.py (97%) rename src/{do_gradientai => gradient}/resources/models/providers/__init__.py (100%) rename src/{do_gradientai => gradient}/resources/models/providers/anthropic.py (99%) rename src/{do_gradientai => gradient}/resources/models/providers/openai.py (99%) rename src/{do_gradientai => gradient}/resources/models/providers/providers.py (95%) rename src/{do_gradientai => gradient}/resources/regions.py (96%) rename src/{do_gradientai => gradient}/types/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agent_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agent_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agent_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agent_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agent_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agent_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/agent_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agent_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agent_update_status_params.py (100%) rename src/{do_gradientai => gradient}/types/agent_update_status_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_evaluation_metric.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_evaluation_metric_result.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_evaluation_prompt.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_evaluation_run.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_evaluation_test_case.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_regenerate_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_key_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_star_metric.py (100%) rename src/{do_gradientai => gradient}/types/agents/api_star_metric_param.py (100%) rename src/{do_gradientai => gradient}/types/agents/chat/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agents/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metric_list_regions_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metric_list_regions_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metric_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/anthropic/key_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/model_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/model_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_list_agents_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_list_agents_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/openai/key_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_run_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_run_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_run_list_results_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/function_create_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/function_create_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/function_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/function_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/function_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/knowledge_base_detach_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/route_add_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/route_add_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/route_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/route_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/route_update_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/route_view_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/version_list_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/version_list_response.py (100%) rename src/{do_gradientai => gradient}/types/agents/version_update_params.py (100%) rename src/{do_gradientai => gradient}/types/agents/version_update_response.py (100%) rename src/{do_gradientai => gradient}/types/api_agent.py (100%) rename src/{do_gradientai => gradient}/types/api_agent_api_key_info.py (100%) rename src/{do_gradientai => gradient}/types/api_agent_model.py (100%) rename src/{do_gradientai => gradient}/types/api_agreement.py (100%) rename src/{do_gradientai => gradient}/types/api_anthropic_api_key_info.py (100%) rename src/{do_gradientai => gradient}/types/api_deployment_visibility.py (100%) rename src/{do_gradientai => gradient}/types/api_knowledge_base.py (100%) rename src/{do_gradientai => gradient}/types/api_model.py (100%) rename src/{do_gradientai => gradient}/types/api_model_version.py (100%) rename src/{do_gradientai => gradient}/types/api_openai_api_key_info.py (100%) rename src/{do_gradientai => gradient}/types/api_retrieval_method.py (100%) rename src/{do_gradientai => gradient}/types/api_workspace.py (100%) rename src/{do_gradientai => gradient}/types/chat/__init__.py (100%) rename src/{do_gradientai => gradient}/types/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradient}/types/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradient}/types/databases/__init__.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/__init__.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/config_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/config_retrieve_subject_response.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/config_update_params.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/config_update_response.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/config_update_subject_params.py (100%) rename src/{do_gradientai => gradient}/types/databases/schema_registry/config_update_subject_response.py (100%) rename src/{do_gradientai => gradient}/types/droplet_backup_policy.py (100%) rename src/{do_gradientai => gradient}/types/droplet_backup_policy_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_delete_by_tag_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_firewalls_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_firewalls_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_kernels_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_kernels_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_neighbors_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_snapshots_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_list_snapshots_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplet_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_update_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/account/key_update_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_bulk_initiate_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_bulk_initiate_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_initiate_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_initiate_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/action_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/associated_resource.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_list_history_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_list_history_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_list_members_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_list_members_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool_droplet_template.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool_droplet_template_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool_dynamic_config.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool_dynamic_config_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool_static_config.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_pool_static_config_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_update_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/autoscale_update_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/backup_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/backup_list_policies_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/backup_list_policies_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/backup_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/backup_list_supported_policies_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/backup_retrieve_policy_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/current_utilization.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/destroy_with_associated_resource_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/destroyed_associated_resource.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/domains.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/domains_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_update_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewall_update_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/droplet_add_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/droplet_remove_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/rule_add_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/rule_remove_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/tag_add_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/firewalls/tag_remove_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ip.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ip_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ip_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ip_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ip_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ip_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ips/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ips/action_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ips/action_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ips/action_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/floating_ips/action_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/forwarding_rule.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/forwarding_rule_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/glb_settings.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/glb_settings_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/health_check.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/health_check_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_update_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/image_update_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/images/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/images/action_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/images/action_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/lb_firewall.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/lb_firewall_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_update_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancer_update_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancers/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancers/droplet_add_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancers/droplet_remove_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/size_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/size_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/snapshot_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/snapshot_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/snapshot_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/sticky_sessions.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/sticky_sessions_param.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volume_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volume_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volume_delete_by_name_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volume_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volume_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volume_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/__init__.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_initiate_by_id_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_initiate_by_id_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_initiate_by_name_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_initiate_by_name_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_retrieve_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/action_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/snapshot_create_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/snapshot_create_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/snapshot_list_params.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/snapshot_list_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/snapshot_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/gpu_droplets/volumes/volume_action.py (100%) rename src/{do_gradientai => gradient}/types/inference/__init__.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_create_params.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_create_response.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_list_params.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_list_response.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_update_params.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_key_update_response.py (100%) rename src/{do_gradientai => gradient}/types/inference/api_model_api_key_info.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_create_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_create_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_list_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_list_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_update_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_base_update_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{do_gradientai => gradient}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{do_gradientai => gradient}/types/model_list_params.py (100%) rename src/{do_gradientai => gradient}/types/model_list_response.py (100%) rename src/{do_gradientai => gradient}/types/models/__init__.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/__init__.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_create_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_create_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_list_agents_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_list_agents_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_list_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_list_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_update_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/anthropic_update_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_create_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_create_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_delete_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_list_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_list_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_retrieve_agents_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_retrieve_agents_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_retrieve_response.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_update_params.py (100%) rename src/{do_gradientai => gradient}/types/models/providers/openai_update_response.py (100%) rename src/{do_gradientai => gradient}/types/region_list_params.py (100%) rename src/{do_gradientai => gradient}/types/region_list_response.py (100%) rename src/{do_gradientai => gradient}/types/shared/__init__.py (100%) rename src/{do_gradientai => gradient}/types/shared/action.py (100%) rename src/{do_gradientai => gradient}/types/shared/action_link.py (100%) rename src/{do_gradientai => gradient}/types/shared/api_links.py (100%) rename src/{do_gradientai => gradient}/types/shared/api_meta.py (100%) rename src/{do_gradientai => gradient}/types/shared/backward_links.py (100%) rename src/{do_gradientai => gradient}/types/shared/chat_completion_chunk.py (100%) rename src/{do_gradientai => gradient}/types/shared/chat_completion_token_logprob.py (100%) rename src/{do_gradientai => gradient}/types/shared/completion_usage.py (100%) rename src/{do_gradientai => gradient}/types/shared/disk_info.py (100%) rename src/{do_gradientai => gradient}/types/shared/droplet.py (100%) rename src/{do_gradientai => gradient}/types/shared/droplet_next_backup_window.py (100%) rename src/{do_gradientai => gradient}/types/shared/firewall_rule_target.py (100%) rename src/{do_gradientai => gradient}/types/shared/forward_links.py (100%) rename src/{do_gradientai => gradient}/types/shared/garbage_collection.py (100%) rename src/{do_gradientai => gradient}/types/shared/gpu_info.py (100%) rename src/{do_gradientai => gradient}/types/shared/image.py (100%) rename src/{do_gradientai => gradient}/types/shared/kernel.py (100%) rename src/{do_gradientai => gradient}/types/shared/meta_properties.py (100%) rename src/{do_gradientai => gradient}/types/shared/network_v4.py (100%) rename src/{do_gradientai => gradient}/types/shared/network_v6.py (100%) rename src/{do_gradientai => gradient}/types/shared/page_links.py (100%) rename src/{do_gradientai => gradient}/types/shared/region.py (100%) rename src/{do_gradientai => gradient}/types/shared/size.py (100%) rename src/{do_gradientai => gradient}/types/shared/snapshots.py (100%) rename src/{do_gradientai => gradient}/types/shared/subscription.py (100%) rename src/{do_gradientai => gradient}/types/shared/subscription_tier_base.py (100%) rename src/{do_gradientai => gradient}/types/shared/vpc_peering.py (100%) rename src/{do_gradientai => gradient}/types/shared_params/__init__.py (100%) rename src/{do_gradientai => gradient}/types/shared_params/firewall_rule_target.py (100%) diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 3dcd6c42..79ee5b7d 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,6 +1,6 @@ # This workflow is triggered when a GitHub release is created. # It can also be run manually to re-publish to PyPI in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml +# You can run this workflow by navigating to https://www.github.com/digitalocean/gradient-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: @@ -28,4 +28,4 @@ jobs: run: | bash ./bin/publish-pypi env: - PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} + PYPI_TOKEN: ${{ secrets.GRADIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index d49e26c2..9c8912bc 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/gradient-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,4 +18,4 @@ jobs: run: | bash ./bin/check-release-environment env: - PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} + PYPI_TOKEN: ${{ secrets.GRADIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 3b125b04..8250cefa 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859 -config_hash: 2eaf277c8b9bf0acf76b2b16f99ff443 +config_hash: b7f3d0224b636e5f618a254fa3a6499a diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f59c83a..212c4e40 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. +modify the contents of the `src/gradient/lib/` and `examples/` directories. ## Adding and running examples @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git +$ pip install git+ssh://git@github.com/digitalocean/gradient-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradient-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/LICENSE b/LICENSE index 974cb08a..656d8887 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 Gradient AI + Copyright 2025 Gradient Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 7d41d9c3..f141a2ea 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ -# Gradient AI Python API library +# Gradient Python API library [![PyPI version](https://img.shields.io/pypi/v/gradient.svg?label=pypi%20(stable))](https://pypi.org/project/gradient/) -The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ +The Gradient Python library provides convenient access to the Gradient REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -26,9 +26,9 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from do_gradientai import GradientAI +from gradient import Gradient -client = GradientAI( +client = Gradient( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) @@ -51,14 +51,14 @@ so that your API Key is not stored in source control. ## Async usage -Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with each API call: +Simply import `AsyncGradient` instead of `Gradient` and use `await` with each API call: ```python import os import asyncio -from do_gradientai import AsyncGradientAI +from gradient import AsyncGradient -client = AsyncGradientAI( +client = AsyncGradient( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) @@ -96,12 +96,12 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import asyncio -from do_gradientai import DefaultAioHttpClient -from do_gradientai import AsyncGradientAI +from gradient import DefaultAioHttpClient +from gradient import AsyncGradient async def main() -> None: - async with AsyncGradientAI( + async with AsyncGradient( api_key="My API Key", http_client=DefaultAioHttpClient(), ) as client: @@ -125,9 +125,9 @@ asyncio.run(main()) We provide support for streaming responses using Server Side Events (SSE). ```python -from do_gradientai import GradientAI +from gradient import Gradient -client = GradientAI() +client = Gradient() stream = client.chat.completions.create( messages=[ @@ -146,9 +146,9 @@ for completion in stream: The async client uses the exact same interface. ```python -from do_gradientai import AsyncGradientAI +from gradient import AsyncGradient -client = AsyncGradientAI() +client = AsyncGradient() stream = await client.chat.completions.create( messages=[ @@ -178,9 +178,9 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from do_gradientai import GradientAI +from gradient import Gradient -client = GradientAI() +client = Gradient() completion = client.chat.completions.create( messages=[ @@ -197,18 +197,18 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradient.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `gradient.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `do_gradientai.APIError`. +All errors inherit from `gradient.APIError`. ```python -import do_gradientai -from do_gradientai import GradientAI +import gradient +from gradient import Gradient -client = GradientAI() +client = Gradient() try: client.chat.completions.create( @@ -220,12 +220,12 @@ try: ], model="llama3.3-70b-instruct", ) -except do_gradientai.APIConnectionError as e: +except gradient.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except do_gradientai.RateLimitError as e: +except gradient.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except do_gradientai.APIStatusError as e: +except gradient.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -253,10 +253,10 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from do_gradientai import GradientAI +from gradient import Gradient # Configure the default for all requests: -client = GradientAI( +client = Gradient( # default is 2 max_retries=0, ) @@ -279,16 +279,16 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from do_gradientai import GradientAI +from gradient import Gradient # Configure the default for all requests: -client = GradientAI( +client = Gradient( # 20 seconds (default is 1 minute) timeout=20.0, ) # More granular control: -client = GradientAI( +client = Gradient( timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), ) @@ -314,10 +314,10 @@ Note that requests that time out are [retried twice by default](#retries). We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. -You can enable logging by setting the environment variable `GRADIENT_AI_LOG` to `info`. +You can enable logging by setting the environment variable `GRADIENT_LOG` to `info`. ```shell -$ export GRADIENT_AI_LOG=info +$ export GRADIENT_LOG=info ``` Or to `debug` for more verbose logging. @@ -339,9 +339,9 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from do_gradientai import GradientAI +from gradient import Gradient -client = GradientAI() +client = Gradient() response = client.chat.completions.with_raw_response.create( messages=[{ "role": "user", @@ -355,9 +355,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.choices) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradient-python/tree/main/src/gradient/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradient-python/tree/main/src/gradient/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -427,10 +427,10 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from do_gradientai import GradientAI, DefaultHttpxClient +from gradient import Gradient, DefaultHttpxClient -client = GradientAI( - # Or use the `GRADIENT_AI_BASE_URL` env var +client = Gradient( + # Or use the `GRADIENT_BASE_URL` env var base_url="http://my.test.server.example.com:8083", http_client=DefaultHttpxClient( proxy="http://my.test.proxy.example.com", @@ -450,9 +450,9 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from do_gradientai import GradientAI +from gradient import Gradient -with GradientAI() as client: +with Gradient() as client: # make requests here ... @@ -469,7 +469,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradient-python/issues) with questions, bugs, or suggestions. ### Determining the installed version @@ -478,8 +478,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import do_gradientai -print(do_gradientai.__version__) +import gradient +print(gradient.__version__) ``` ## Requirements diff --git a/SECURITY.md b/SECURITY.md index a7593759..fe1c055c 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,7 +16,7 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Gradient AI, please follow the respective company's security reporting guidelines. +or products provided by Gradient, please follow the respective company's security reporting guidelines. --- diff --git a/api.md b/api.md index dc52233d..1091e4dc 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from do_gradientai.types import ( +from gradient.types import ( Action, ActionLink, APILinks, @@ -37,7 +37,7 @@ from do_gradientai.types import ( Types: ```python -from do_gradientai.types import ( +from gradient.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -57,19 +57,19 @@ from do_gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from do_gradientai.types.agents import ( +from gradient.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -80,11 +80,11 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Chat @@ -93,35 +93,32 @@ Methods: Types: ```python -from do_gradientai.types.agents.chat import CompletionCreateResponse +from gradient.types.agents.chat import CompletionCreateResponse ``` Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics Types: ```python -from do_gradientai.types.agents import ( - EvaluationMetricListResponse, - EvaluationMetricListRegionsResponse, -) +from gradient.types.agents import EvaluationMetricListResponse, EvaluationMetricListRegionsResponse ``` Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ( +from gradient.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -133,40 +130,37 @@ from do_gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( - AgentListResponse, - AgentMoveResponse, -) +from gradient.types.agents.evaluation_metrics.workspaces import AgentListResponse, AgentMoveResponse ``` Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ### Models Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ModelListResponse +from gradient.types.agents.evaluation_metrics import ModelListResponse ``` Methods: -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse ### Anthropic @@ -175,7 +169,7 @@ Methods: Types: ```python -from do_gradientai.types.agents.evaluation_metrics.anthropic import ( +from gradient.types.agents.evaluation_metrics.anthropic import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -187,12 +181,12 @@ from do_gradientai.types.agents.evaluation_metrics.anthropic import ( Methods: -- client.agents.evaluation_metrics.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.agents.evaluation_metrics.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.agents.evaluation_metrics.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.agents.evaluation_metrics.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.agents.evaluation_metrics.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.agents.evaluation_metrics.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +- client.agents.evaluation_metrics.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.agents.evaluation_metrics.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.agents.evaluation_metrics.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.agents.evaluation_metrics.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.agents.evaluation_metrics.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.agents.evaluation_metrics.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse ### OpenAI @@ -201,7 +195,7 @@ Methods: Types: ```python -from do_gradientai.types.agents.evaluation_metrics.openai import ( +from gradient.types.agents.evaluation_metrics.openai import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -213,19 +207,19 @@ from do_gradientai.types.agents.evaluation_metrics.openai import ( Methods: -- client.agents.evaluation_metrics.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.agents.evaluation_metrics.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.agents.evaluation_metrics.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.agents.evaluation_metrics.openai.keys.list(\*\*params) -> KeyListResponse -- client.agents.evaluation_metrics.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.agents.evaluation_metrics.openai.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +- client.agents.evaluation_metrics.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.agents.evaluation_metrics.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.agents.evaluation_metrics.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.agents.evaluation_metrics.openai.keys.list(\*\*params) -> KeyListResponse +- client.agents.evaluation_metrics.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.agents.evaluation_metrics.openai.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse ## EvaluationRuns Types: ```python -from do_gradientai.types.agents import ( +from gradient.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -239,17 +233,17 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from do_gradientai.types.agents import ( +from gradient.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -262,18 +256,18 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from do_gradientai.types.agents import ( +from gradient.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -281,15 +275,15 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from do_gradientai.types.agents import ( +from gradient.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -298,43 +292,43 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from gradient.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradient.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from do_gradientai.types.agents import ( +from gradient.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -344,10 +338,10 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # Chat @@ -356,19 +350,19 @@ Methods: Types: ```python -from do_gradientai.types.chat import CompletionCreateResponse +from gradient.types.chat import CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # GPUDroplets Types: ```python -from do_gradientai.types import ( +from gradient.types import ( DropletBackupPolicy, GPUDropletCreateResponse, GPUDropletRetrieveResponse, @@ -382,22 +376,22 @@ from do_gradientai.types import ( Methods: -- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse -- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse -- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse -- client.gpu_droplets.delete(droplet_id) -> None -- client.gpu_droplets.delete_by_tag(\*\*params) -> None -- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse -- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse -- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse -- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse ## Backups Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupListSupportedPoliciesResponse, @@ -407,17 +401,17 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse ## Actions Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( ActionRetrieveResponse, ActionListResponse, ActionBulkInitiateResponse, @@ -427,17 +421,17 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse ## DestroyWithAssociatedResources Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( AssociatedResource, DestroyedAssociatedResource, DestroyWithAssociatedResourceListResponse, @@ -447,18 +441,18 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None ## Autoscale Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( AutoscalePool, AutoscalePoolDropletTemplate, AutoscalePoolDynamicConfig, @@ -475,21 +469,21 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse ## Firewalls Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( Firewall, FirewallCreateResponse, FirewallRetrieveResponse, @@ -500,39 +494,39 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse -- client.gpu_droplets.firewalls.delete(firewall_id) -> None +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None ### Droplets Methods: -- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None ### Tags Methods: -- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None ### Rules Methods: -- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None ## FloatingIPs Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( FloatingIP, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -542,17 +536,17 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.gpu_droplets.floating_ips.delete(floating_ip) -> None +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None ### Actions Types: ```python -from do_gradientai.types.gpu_droplets.floating_ips import ( +from gradient.types.gpu_droplets.floating_ips import ( ActionCreateResponse, ActionRetrieveResponse, ActionListResponse, @@ -561,16 +555,16 @@ from do_gradientai.types.gpu_droplets.floating_ips import ( Methods: -- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse ## Images Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( ImageCreateResponse, ImageRetrieveResponse, ImageUpdateResponse, @@ -580,32 +574,32 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse -- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse -- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse -- client.gpu_droplets.images.delete(image_id) -> None +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None ### Actions Types: ```python -from do_gradientai.types.gpu_droplets.images import ActionListResponse +from gradient.types.gpu_droplets.images import ActionListResponse ``` Methods: -- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action -- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse ## LoadBalancers Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( Domains, ForwardingRule, GlbSettings, @@ -622,59 +616,59 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.gpu_droplets.load_balancers.delete(lb_id) -> None -- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None ### Droplets Methods: -- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None ### ForwardingRules Methods: -- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None ## Sizes Types: ```python -from do_gradientai.types.gpu_droplets import SizeListResponse +from gradient.types.gpu_droplets import SizeListResponse ``` Methods: -- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse ## Snapshots Types: ```python -from do_gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +from gradient.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse ``` Methods: -- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse -- client.gpu_droplets.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None ## Volumes Types: ```python -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse, @@ -683,18 +677,18 @@ from do_gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse -- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse -- client.gpu_droplets.volumes.delete(volume_id) -> None -- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None ### Actions Types: ```python -from do_gradientai.types.gpu_droplets.volumes import ( +from gradient.types.gpu_droplets.volumes import ( VolumeAction, ActionRetrieveResponse, ActionListResponse, @@ -705,17 +699,17 @@ from do_gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse ### Snapshots Types: ```python -from do_gradientai.types.gpu_droplets.volumes import ( +from gradient.types.gpu_droplets.volumes import ( SnapshotCreateResponse, SnapshotRetrieveResponse, SnapshotListResponse, @@ -724,10 +718,10 @@ from do_gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None ## Account @@ -736,7 +730,7 @@ Methods: Types: ```python -from do_gradientai.types.gpu_droplets.account import ( +from gradient.types.gpu_droplets.account import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -746,11 +740,11 @@ from do_gradientai.types.gpu_droplets.account import ( Methods: -- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse -- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse -- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None # Inference @@ -759,7 +753,7 @@ Methods: Types: ```python -from do_gradientai.types.inference import ( +from gradient.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -771,18 +765,18 @@ from do_gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # KnowledgeBases Types: ```python -from do_gradientai.types import ( +from gradient.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -794,18 +788,18 @@ from do_gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradient.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -819,16 +813,16 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradient.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -841,23 +835,23 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Models Types: ```python -from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse +from gradient.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse ## Providers @@ -866,7 +860,7 @@ Methods: Types: ```python -from do_gradientai.types.models.providers import ( +from gradient.types.models.providers import ( AnthropicCreateResponse, AnthropicRetrieveResponse, AnthropicUpdateResponse, @@ -878,19 +872,19 @@ from do_gradientai.types.models.providers import ( Methods: -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse ### OpenAI Types: ```python -from do_gradientai.types.models.providers import ( +from gradient.types.models.providers import ( OpenAICreateResponse, OpenAIRetrieveResponse, OpenAIUpdateResponse, @@ -902,24 +896,24 @@ from do_gradientai.types.models.providers import ( Methods: -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse # Regions Types: ```python -from do_gradientai.types import RegionListResponse +from gradient.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # Databases @@ -930,7 +924,7 @@ Methods: Types: ```python -from do_gradientai.types.databases.schema_registry import ( +from gradient.types.databases.schema_registry import ( ConfigRetrieveResponse, ConfigUpdateResponse, ConfigRetrieveSubjectResponse, @@ -940,7 +934,7 @@ from do_gradientai.types.databases.schema_registry import ( Methods: -- client.databases.schema_registry.config.retrieve(database_cluster_uuid) -> ConfigRetrieveResponse -- client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse -- client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse -- client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse +- client.databases.schema_registry.config.retrieve(database_cluster_uuid) -> ConfigRetrieveResponse +- client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse +- client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse +- client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse diff --git a/mypy.ini b/mypy.ini index 82b0c891..9a8e555e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/gradient/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index fb34755a..3aaa2fdc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [project] name = "gradient" version = "0.1.0-beta.3" -description = "The official Python library for the GradientAI API" +description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" authors = [ -{ name = "Gradient AI", email = "" }, +{ name = "Gradient", email = "" }, ] dependencies = [ "httpx>=0.23.0, <1", @@ -35,8 +35,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/gradientai-python" -Repository = "https://github.com/digitalocean/gradientai-python" +Homepage = "https://github.com/digitalocean/gradient-python" +Repository = "https://github.com/digitalocean/gradient-python" [project.optional-dependencies] aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] @@ -79,14 +79,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import do_gradientai'" +"check:importable" = "python -c 'import gradient'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes gradient --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -99,7 +99,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/do_gradientai"] +packages = ["src/gradient"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -125,7 +125,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/gradient-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] @@ -202,7 +202,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["do_gradientai", "tests"] +known-first-party = ["gradient", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index a320c1a8..0b0d1705 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/do_gradientai/_version.py" + "src/gradient/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index e46e909b..9ccb6ae5 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import do_gradientai' +rye run python -c 'import gradient' diff --git a/src/do_gradientai/__init__.py b/src/gradient/__init__.py similarity index 90% rename from src/do_gradientai/__init__.py rename to src/gradient/__init__.py index 41b943b2..c78eff30 100644 --- a/src/do_gradientai/__init__.py +++ b/src/gradient/__init__.py @@ -9,12 +9,12 @@ Client, Stream, Timeout, + Gradient, Transport, - GradientAI, AsyncClient, AsyncStream, + AsyncGradient, RequestOptions, - AsyncGradientAI, ) from ._models import BaseModel from ._version import __title__, __version__ @@ -23,12 +23,12 @@ from ._exceptions import ( APIError, ConflictError, + GradientError, NotFoundError, APIStatusError, RateLimitError, APITimeoutError, BadRequestError, - GradientAIError, APIConnectionError, AuthenticationError, InternalServerError, @@ -49,7 +49,7 @@ "NotGiven", "NOT_GIVEN", "Omit", - "GradientAIError", + "GradientError", "APIError", "APIStatusError", "APITimeoutError", @@ -69,8 +69,8 @@ "AsyncClient", "Stream", "AsyncStream", - "GradientAI", - "AsyncGradientAI", + "Gradient", + "AsyncGradient", "file_from_path", "BaseModel", "DEFAULT_TIMEOUT", @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError +# gradient._exceptions.NotFoundError -> gradient.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "do_gradientai" + __locals[__name].__module__ = "gradient" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/do_gradientai/_base_client.py b/src/gradient/_base_client.py similarity index 99% rename from src/do_gradientai/_base_client.py rename to src/gradient/_base_client.py index 326c662c..5ea17c81 100644 --- a/src/do_gradientai/_base_client.py +++ b/src/gradient/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradient.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/do_gradientai/_client.py b/src/gradient/_client.py similarity index 95% rename from src/do_gradientai/_client.py rename to src/gradient/_client.py index 24f65772..03f8ddcd 100644 --- a/src/do_gradientai/_client.py +++ b/src/gradient/_client.py @@ -47,14 +47,14 @@ "Transport", "ProxiesTypes", "RequestOptions", - "GradientAI", - "AsyncGradientAI", + "Gradient", + "AsyncGradient", "Client", "AsyncClient", ] -class GradientAI(SyncAPIClient): +class Gradient(SyncAPIClient): # client options api_key: str | None inference_key: str | None @@ -87,7 +87,7 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new synchronous GradientAI client instance. + """Construct a new synchronous Gradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `GRADIENTAI_API_KEY` @@ -109,7 +109,7 @@ def __init__( self.agent_domain = agent_domain if base_url is None: - base_url = os.environ.get("GRADIENT_AI_BASE_URL") + base_url = os.environ.get("GRADIENT_BASE_URL") self._base_url_overridden = base_url is not None if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -176,12 +176,12 @@ def databases(self) -> DatabasesResource: return DatabasesResource(self) @cached_property - def with_raw_response(self) -> GradientAIWithRawResponse: - return GradientAIWithRawResponse(self) + def with_raw_response(self) -> GradientWithRawResponse: + return GradientWithRawResponse(self) @cached_property - def with_streaming_response(self) -> GradientAIWithStreamedResponse: - return GradientAIWithStreamedResponse(self) + def with_streaming_response(self) -> GradientWithStreamedResponse: + return GradientWithStreamedResponse(self) @property @override @@ -309,7 +309,7 @@ def _make_status_error( return APIStatusError(err_msg, response=response, body=body) -class AsyncGradientAI(AsyncAPIClient): +class AsyncGradient(AsyncAPIClient): # client options api_key: str | None inference_key: str | None @@ -342,7 +342,7 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new async AsyncGradientAI client instance. + """Construct a new async AsyncGradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `GRADIENTAI_API_KEY` @@ -364,7 +364,7 @@ def __init__( self.agent_domain = agent_domain if base_url is None: - base_url = os.environ.get("GRADIENT_AI_BASE_URL") + base_url = os.environ.get("GRADIENT_BASE_URL") self._base_url_overridden = base_url is not None if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -431,12 +431,12 @@ def databases(self) -> AsyncDatabasesResource: return AsyncDatabasesResource(self) @cached_property - def with_raw_response(self) -> AsyncGradientAIWithRawResponse: - return AsyncGradientAIWithRawResponse(self) + def with_raw_response(self) -> AsyncGradientWithRawResponse: + return AsyncGradientWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncGradientAIWithStreamedResponse: - return AsyncGradientAIWithStreamedResponse(self) + def with_streaming_response(self) -> AsyncGradientWithStreamedResponse: + return AsyncGradientWithStreamedResponse(self) @property @override @@ -564,10 +564,10 @@ def _make_status_error( return APIStatusError(err_msg, response=response, body=body) -class GradientAIWithRawResponse: - _client: GradientAI +class GradientWithRawResponse: + _client: Gradient - def __init__(self, client: GradientAI) -> None: + def __init__(self, client: Gradient) -> None: self._client = client @cached_property @@ -619,10 +619,10 @@ def databases(self) -> databases.DatabasesResourceWithRawResponse: return DatabasesResourceWithRawResponse(self._client.databases) -class AsyncGradientAIWithRawResponse: - _client: AsyncGradientAI +class AsyncGradientWithRawResponse: + _client: AsyncGradient - def __init__(self, client: AsyncGradientAI) -> None: + def __init__(self, client: AsyncGradient) -> None: self._client = client @cached_property @@ -674,10 +674,10 @@ def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse: return AsyncDatabasesResourceWithRawResponse(self._client.databases) -class GradientAIWithStreamedResponse: - _client: GradientAI +class GradientWithStreamedResponse: + _client: Gradient - def __init__(self, client: GradientAI) -> None: + def __init__(self, client: Gradient) -> None: self._client = client @cached_property @@ -729,10 +729,10 @@ def databases(self) -> databases.DatabasesResourceWithStreamingResponse: return DatabasesResourceWithStreamingResponse(self._client.databases) -class AsyncGradientAIWithStreamedResponse: - _client: AsyncGradientAI +class AsyncGradientWithStreamedResponse: + _client: AsyncGradient - def __init__(self, client: AsyncGradientAI) -> None: + def __init__(self, client: AsyncGradient) -> None: self._client = client @cached_property @@ -784,6 +784,6 @@ def databases(self) -> databases.AsyncDatabasesResourceWithStreamingResponse: return AsyncDatabasesResourceWithStreamingResponse(self._client.databases) -Client = GradientAI +Client = Gradient -AsyncClient = AsyncGradientAI +AsyncClient = AsyncGradient diff --git a/src/do_gradientai/_compat.py b/src/gradient/_compat.py similarity index 100% rename from src/do_gradientai/_compat.py rename to src/gradient/_compat.py diff --git a/src/do_gradientai/_constants.py b/src/gradient/_constants.py similarity index 100% rename from src/do_gradientai/_constants.py rename to src/gradient/_constants.py diff --git a/src/do_gradientai/_exceptions.py b/src/gradient/_exceptions.py similarity index 97% rename from src/do_gradientai/_exceptions.py rename to src/gradient/_exceptions.py index 759c8d86..5db08573 100644 --- a/src/do_gradientai/_exceptions.py +++ b/src/gradient/_exceptions.py @@ -18,11 +18,11 @@ ] -class GradientAIError(Exception): +class GradientError(Exception): pass -class APIError(GradientAIError): +class APIError(GradientError): message: str request: httpx.Request diff --git a/src/do_gradientai/_files.py b/src/gradient/_files.py similarity index 100% rename from src/do_gradientai/_files.py rename to src/gradient/_files.py diff --git a/src/do_gradientai/_models.py b/src/gradient/_models.py similarity index 100% rename from src/do_gradientai/_models.py rename to src/gradient/_models.py diff --git a/src/do_gradientai/_qs.py b/src/gradient/_qs.py similarity index 100% rename from src/do_gradientai/_qs.py rename to src/gradient/_qs.py diff --git a/src/do_gradientai/_resource.py b/src/gradient/_resource.py similarity index 80% rename from src/do_gradientai/_resource.py rename to src/gradient/_resource.py index 9182ee0b..f2bb6c14 100644 --- a/src/do_gradientai/_resource.py +++ b/src/gradient/_resource.py @@ -8,13 +8,13 @@ import anyio if TYPE_CHECKING: - from ._client import GradientAI, AsyncGradientAI + from ._client import Gradient, AsyncGradient class SyncAPIResource: - _client: GradientAI + _client: Gradient - def __init__(self, client: GradientAI) -> None: + def __init__(self, client: Gradient) -> None: self._client = client self._get = client.get self._post = client.post @@ -28,9 +28,9 @@ def _sleep(self, seconds: float) -> None: class AsyncAPIResource: - _client: AsyncGradientAI + _client: AsyncGradient - def __init__(self, client: AsyncGradientAI) -> None: + def __init__(self, client: AsyncGradient) -> None: self._client = client self._get = client.get self._post = client.post diff --git a/src/do_gradientai/_response.py b/src/gradient/_response.py similarity index 98% rename from src/do_gradientai/_response.py rename to src/gradient/_response.py index 8ca43971..4702edaf 100644 --- a/src/do_gradientai/_response.py +++ b/src/gradient/_response.py @@ -29,7 +29,7 @@ from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type -from ._exceptions import GradientAIError, APIResponseValidationError +from ._exceptions import GradientError, APIResponseValidationError if TYPE_CHECKING: from ._models import FinalRequestOptions @@ -217,9 +217,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel) ): - raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" - ) + raise TypeError("Pydantic models must subclass our base model type, e.g. `from gradient import BaseModel`") if ( cast_to is not object @@ -285,7 +283,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradient import BaseModel class MyModel(BaseModel): @@ -387,7 +385,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradient import BaseModel class MyModel(BaseModel): @@ -558,11 +556,11 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradient._streaming` for reference", ) -class StreamAlreadyConsumed(GradientAIError): +class StreamAlreadyConsumed(GradientError): """ Attempted to read or stream content, but the content has already been streamed. diff --git a/src/do_gradientai/_streaming.py b/src/gradient/_streaming.py similarity index 99% rename from src/do_gradientai/_streaming.py rename to src/gradient/_streaming.py index 69a805ad..eb9be89d 100644 --- a/src/do_gradientai/_streaming.py +++ b/src/gradient/_streaming.py @@ -13,7 +13,7 @@ from ._exceptions import APIError if TYPE_CHECKING: - from ._client import GradientAI, AsyncGradientAI + from ._client import Gradient, AsyncGradient _T = TypeVar("_T") @@ -31,7 +31,7 @@ def __init__( *, cast_to: type[_T], response: httpx.Response, - client: GradientAI, + client: Gradient, ) -> None: self.response = response self._cast_to = cast_to @@ -112,7 +112,7 @@ def __init__( *, cast_to: type[_T], response: httpx.Response, - client: AsyncGradientAI, + client: AsyncGradient, ) -> None: self.response = response self._cast_to = cast_to diff --git a/src/do_gradientai/_types.py b/src/gradient/_types.py similarity index 99% rename from src/do_gradientai/_types.py rename to src/gradient/_types.py index c356c700..b44bb2d9 100644 --- a/src/do_gradientai/_types.py +++ b/src/gradient/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from do_gradientai import NoneType +# from gradient import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/do_gradientai/_utils/__init__.py b/src/gradient/_utils/__init__.py similarity index 100% rename from src/do_gradientai/_utils/__init__.py rename to src/gradient/_utils/__init__.py diff --git a/src/do_gradientai/_utils/_logs.py b/src/gradient/_utils/_logs.py similarity index 70% rename from src/do_gradientai/_utils/_logs.py rename to src/gradient/_utils/_logs.py index ac45b1a5..a60da7f9 100644 --- a/src/do_gradientai/_utils/_logs.py +++ b/src/gradient/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("do_gradientai") +logger: logging.Logger = logging.getLogger("gradient") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - gradient._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", @@ -14,7 +14,7 @@ def _basic_config() -> None: def setup_logging() -> None: - env = os.environ.get("GRADIENT_AI_LOG") + env = os.environ.get("GRADIENT_LOG") if env == "debug": _basic_config() logger.setLevel(logging.DEBUG) diff --git a/src/do_gradientai/_utils/_proxy.py b/src/gradient/_utils/_proxy.py similarity index 100% rename from src/do_gradientai/_utils/_proxy.py rename to src/gradient/_utils/_proxy.py diff --git a/src/do_gradientai/_utils/_reflection.py b/src/gradient/_utils/_reflection.py similarity index 100% rename from src/do_gradientai/_utils/_reflection.py rename to src/gradient/_utils/_reflection.py diff --git a/src/do_gradientai/_utils/_resources_proxy.py b/src/gradient/_utils/_resources_proxy.py similarity index 50% rename from src/do_gradientai/_utils/_resources_proxy.py rename to src/gradient/_utils/_resources_proxy.py index 03763c3b..bf3e570d 100644 --- a/src/do_gradientai/_utils/_resources_proxy.py +++ b/src/gradient/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `do_gradientai.resources` module. + """A proxy for the `gradient.resources` module. - This is used so that we can lazily import `do_gradientai.resources` only when - needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` + This is used so that we can lazily import `gradient.resources` only when + needed *and* so that users can just import `gradient` and reference `gradient.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("do_gradientai.resources") + mod = importlib.import_module("gradient.resources") return mod diff --git a/src/do_gradientai/_utils/_streams.py b/src/gradient/_utils/_streams.py similarity index 100% rename from src/do_gradientai/_utils/_streams.py rename to src/gradient/_utils/_streams.py diff --git a/src/do_gradientai/_utils/_sync.py b/src/gradient/_utils/_sync.py similarity index 100% rename from src/do_gradientai/_utils/_sync.py rename to src/gradient/_utils/_sync.py diff --git a/src/do_gradientai/_utils/_transform.py b/src/gradient/_utils/_transform.py similarity index 100% rename from src/do_gradientai/_utils/_transform.py rename to src/gradient/_utils/_transform.py diff --git a/src/do_gradientai/_utils/_typing.py b/src/gradient/_utils/_typing.py similarity index 100% rename from src/do_gradientai/_utils/_typing.py rename to src/gradient/_utils/_typing.py diff --git a/src/do_gradientai/_utils/_utils.py b/src/gradient/_utils/_utils.py similarity index 100% rename from src/do_gradientai/_utils/_utils.py rename to src/gradient/_utils/_utils.py diff --git a/src/do_gradientai/_version.py b/src/gradient/_version.py similarity index 83% rename from src/do_gradientai/_version.py rename to src/gradient/_version.py index 2789c067..4a62d21d 100644 --- a/src/do_gradientai/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "do_gradientai" +__title__ = "gradient" __version__ = "0.1.0-beta.3" # x-release-please-version diff --git a/src/gradient/lib/.keep b/src/gradient/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/gradient/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/do_gradientai/py.typed b/src/gradient/py.typed similarity index 100% rename from src/do_gradientai/py.typed rename to src/gradient/py.typed diff --git a/src/do_gradientai/resources/__init__.py b/src/gradient/resources/__init__.py similarity index 100% rename from src/do_gradientai/resources/__init__.py rename to src/gradient/resources/__init__.py diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/gradient/resources/agents/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/__init__.py rename to src/gradient/resources/agents/__init__.py diff --git a/src/do_gradientai/resources/agents/agents.py b/src/gradient/resources/agents/agents.py similarity index 99% rename from src/do_gradientai/resources/agents/agents.py rename to src/gradient/resources/agents/agents.py index 92d696ba..cff147c9 100644 --- a/src/do_gradientai/resources/agents/agents.py +++ b/src/gradient/resources/agents/agents.py @@ -164,7 +164,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -173,7 +173,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -605,7 +605,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -614,7 +614,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/api_keys.py b/src/gradient/resources/agents/api_keys.py similarity index 99% rename from src/do_gradientai/resources/agents/api_keys.py rename to src/gradient/resources/agents/api_keys.py index 9f4d9660..7e9feb51 100644 --- a/src/do_gradientai/resources/agents/api_keys.py +++ b/src/gradient/resources/agents/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -288,7 +288,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -297,7 +297,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/chat/__init__.py b/src/gradient/resources/agents/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/__init__.py rename to src/gradient/resources/agents/chat/__init__.py diff --git a/src/do_gradientai/resources/agents/chat/chat.py b/src/gradient/resources/agents/chat/chat.py similarity index 93% rename from src/do_gradientai/resources/agents/chat/chat.py rename to src/gradient/resources/agents/chat/chat.py index c87bd158..80947cfb 100644 --- a/src/do_gradientai/resources/agents/chat/chat.py +++ b/src/gradient/resources/agents/chat/chat.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py similarity index 99% rename from src/do_gradientai/resources/agents/chat/completions.py rename to src/gradient/resources/agents/chat/completions.py index bc6f3084..0ff797bf 100644 --- a/src/do_gradientai/resources/agents/chat/completions.py +++ b/src/gradient/resources/agents/chat/completions.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return CompletionsResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return CompletionsResourceWithStreamingResponse(self) @@ -506,7 +506,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncCompletionsResourceWithRawResponse(self) @@ -515,7 +515,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncCompletionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_datasets.py b/src/gradient/resources/agents/evaluation_datasets.py similarity index 98% rename from src/do_gradientai/resources/agents/evaluation_datasets.py rename to src/gradient/resources/agents/evaluation_datasets.py index 42eca703..d8e960de 100644 --- a/src/do_gradientai/resources/agents/evaluation_datasets.py +++ b/src/gradient/resources/agents/evaluation_datasets.py @@ -37,7 +37,7 @@ def with_raw_response(self) -> EvaluationDatasetsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return EvaluationDatasetsResourceWithRawResponse(self) @@ -46,7 +46,7 @@ def with_streaming_response(self) -> EvaluationDatasetsResourceWithStreamingResp """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return EvaluationDatasetsResourceWithStreamingResponse(self) @@ -144,7 +144,7 @@ def with_raw_response(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncEvaluationDatasetsResourceWithRawResponse(self) @@ -153,7 +153,7 @@ def with_streaming_response(self) -> AsyncEvaluationDatasetsResourceWithStreamin """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncEvaluationDatasetsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradient/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/gradient/resources/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py rename to src/gradient/resources/agents/evaluation_metrics/anthropic/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/anthropic.py similarity index 93% rename from src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py rename to src/gradient/resources/agents/evaluation_metrics/anthropic/anthropic.py index 1532f98e..0079d59b 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py +++ b/src/gradient/resources/agents/evaluation_metrics/anthropic/anthropic.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py similarity index 99% rename from src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py rename to src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py index 959e786b..6111bf6f 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py +++ b/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py @@ -38,7 +38,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -47,7 +47,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -330,7 +330,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -339,7 +339,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 98% rename from src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py index 533a68bd..f6453d4d 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -77,7 +77,7 @@ def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return EvaluationMetricsResourceWithRawResponse(self) @@ -86,7 +86,7 @@ def with_streaming_response(self) -> EvaluationMetricsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return EvaluationMetricsResourceWithStreamingResponse(self) @@ -186,7 +186,7 @@ def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncEvaluationMetricsResourceWithRawResponse(self) @@ -195,7 +195,7 @@ def with_streaming_response(self) -> AsyncEvaluationMetricsResourceWithStreaming """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncEvaluationMetricsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/models.py b/src/gradient/resources/agents/evaluation_metrics/models.py similarity index 97% rename from src/do_gradientai/resources/agents/evaluation_metrics/models.py rename to src/gradient/resources/agents/evaluation_metrics/models.py index 20a44a22..1902a4f0 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/models.py +++ b/src/gradient/resources/agents/evaluation_metrics/models.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -128,7 +128,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -137,7 +137,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py b/src/gradient/resources/agents/evaluation_metrics/openai/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py rename to src/gradient/resources/agents/evaluation_metrics/openai/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py b/src/gradient/resources/agents/evaluation_metrics/openai/keys.py similarity index 99% rename from src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py rename to src/gradient/resources/agents/evaluation_metrics/openai/keys.py index 33a71ae1..00131691 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py +++ b/src/gradient/resources/agents/evaluation_metrics/openai/keys.py @@ -38,7 +38,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -47,7 +47,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -328,7 +328,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -337,7 +337,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py b/src/gradient/resources/agents/evaluation_metrics/openai/openai.py similarity index 93% rename from src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py rename to src/gradient/resources/agents/evaluation_metrics/openai/openai.py index d66dbbde..00fd8a7d 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py +++ b/src/gradient/resources/agents/evaluation_metrics/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradient/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 98% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py index a5e68a45..1a73bc60 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py +++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -159,7 +159,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -168,7 +168,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 99% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py index cb213e1d..a2cf5ebc 100644 --- a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py +++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -49,7 +49,7 @@ def with_raw_response(self) -> WorkspacesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return WorkspacesResourceWithRawResponse(self) @@ -58,7 +58,7 @@ def with_streaming_response(self) -> WorkspacesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return WorkspacesResourceWithStreamingResponse(self) @@ -311,7 +311,7 @@ def with_raw_response(self) -> AsyncWorkspacesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncWorkspacesResourceWithRawResponse(self) @@ -320,7 +320,7 @@ def with_streaming_response(self) -> AsyncWorkspacesResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncWorkspacesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_runs.py b/src/gradient/resources/agents/evaluation_runs.py similarity index 98% rename from src/do_gradientai/resources/agents/evaluation_runs.py rename to src/gradient/resources/agents/evaluation_runs.py index c5ea2520..e55cc275 100644 --- a/src/do_gradientai/resources/agents/evaluation_runs.py +++ b/src/gradient/resources/agents/evaluation_runs.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> EvaluationRunsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return EvaluationRunsResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return EvaluationRunsResourceWithStreamingResponse(self) @@ -235,7 +235,7 @@ def with_raw_response(self) -> AsyncEvaluationRunsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncEvaluationRunsResourceWithRawResponse(self) @@ -244,7 +244,7 @@ def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncEvaluationRunsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/evaluation_test_cases.py b/src/gradient/resources/agents/evaluation_test_cases.py similarity index 99% rename from src/do_gradientai/resources/agents/evaluation_test_cases.py rename to src/gradient/resources/agents/evaluation_test_cases.py index e33f9f91..454576c8 100644 --- a/src/do_gradientai/resources/agents/evaluation_test_cases.py +++ b/src/gradient/resources/agents/evaluation_test_cases.py @@ -42,7 +42,7 @@ def with_raw_response(self) -> EvaluationTestCasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return EvaluationTestCasesResourceWithRawResponse(self) @@ -51,7 +51,7 @@ def with_streaming_response(self) -> EvaluationTestCasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return EvaluationTestCasesResourceWithStreamingResponse(self) @@ -304,7 +304,7 @@ def with_raw_response(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncEvaluationTestCasesResourceWithRawResponse(self) @@ -313,7 +313,7 @@ def with_streaming_response(self) -> AsyncEvaluationTestCasesResourceWithStreami """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncEvaluationTestCasesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/functions.py b/src/gradient/resources/agents/functions.py similarity index 98% rename from src/do_gradientai/resources/agents/functions.py rename to src/gradient/resources/agents/functions.py index 1c5b2015..7986f750 100644 --- a/src/do_gradientai/resources/agents/functions.py +++ b/src/gradient/resources/agents/functions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> FunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return FunctionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return FunctionsResourceWithStreamingResponse(self) @@ -235,7 +235,7 @@ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncFunctionsResourceWithRawResponse(self) @@ -244,7 +244,7 @@ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncFunctionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/knowledge_bases.py b/src/gradient/resources/agents/knowledge_bases.py similarity index 98% rename from src/do_gradientai/resources/agents/knowledge_bases.py rename to src/gradient/resources/agents/knowledge_bases.py index a5486c34..1664ee84 100644 --- a/src/do_gradientai/resources/agents/knowledge_bases.py +++ b/src/gradient/resources/agents/knowledge_bases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -166,7 +166,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -175,7 +175,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/routes.py b/src/gradient/resources/agents/routes.py similarity index 99% rename from src/do_gradientai/resources/agents/routes.py rename to src/gradient/resources/agents/routes.py index a7a298f2..1007b08f 100644 --- a/src/do_gradientai/resources/agents/routes.py +++ b/src/gradient/resources/agents/routes.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> RoutesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return RoutesResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> RoutesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return RoutesResourceWithStreamingResponse(self) @@ -257,7 +257,7 @@ def with_raw_response(self) -> AsyncRoutesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncRoutesResourceWithRawResponse(self) @@ -266,7 +266,7 @@ def with_streaming_response(self) -> AsyncRoutesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncRoutesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/agents/versions.py b/src/gradient/resources/agents/versions.py similarity index 98% rename from src/do_gradientai/resources/agents/versions.py rename to src/gradient/resources/agents/versions.py index 77eabea9..bc56e032 100644 --- a/src/do_gradientai/resources/agents/versions.py +++ b/src/gradient/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -151,7 +151,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -160,7 +160,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/chat/__init__.py b/src/gradient/resources/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/chat/__init__.py rename to src/gradient/resources/chat/__init__.py diff --git a/src/do_gradientai/resources/chat/chat.py b/src/gradient/resources/chat/chat.py similarity index 93% rename from src/do_gradientai/resources/chat/chat.py rename to src/gradient/resources/chat/chat.py index 6fa2925d..ac933129 100644 --- a/src/do_gradientai/resources/chat/chat.py +++ b/src/gradient/resources/chat/chat.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/chat/completions.py b/src/gradient/resources/chat/completions.py similarity index 99% rename from src/do_gradientai/resources/chat/completions.py rename to src/gradient/resources/chat/completions.py index a0545173..4dc98fa5 100644 --- a/src/do_gradientai/resources/chat/completions.py +++ b/src/gradient/resources/chat/completions.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return CompletionsResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return CompletionsResourceWithStreamingResponse(self) @@ -506,7 +506,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncCompletionsResourceWithRawResponse(self) @@ -515,7 +515,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncCompletionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/databases/__init__.py b/src/gradient/resources/databases/__init__.py similarity index 100% rename from src/do_gradientai/resources/databases/__init__.py rename to src/gradient/resources/databases/__init__.py diff --git a/src/do_gradientai/resources/databases/databases.py b/src/gradient/resources/databases/databases.py similarity index 94% rename from src/do_gradientai/resources/databases/databases.py rename to src/gradient/resources/databases/databases.py index e1f990d5..120ab91f 100644 --- a/src/do_gradientai/resources/databases/databases.py +++ b/src/gradient/resources/databases/databases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> DatabasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return DatabasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> DatabasesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return DatabasesResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncDatabasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncDatabasesResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncDatabasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncDatabasesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/databases/schema_registry/__init__.py b/src/gradient/resources/databases/schema_registry/__init__.py similarity index 100% rename from src/do_gradientai/resources/databases/schema_registry/__init__.py rename to src/gradient/resources/databases/schema_registry/__init__.py diff --git a/src/do_gradientai/resources/databases/schema_registry/config.py b/src/gradient/resources/databases/schema_registry/config.py similarity index 99% rename from src/do_gradientai/resources/databases/schema_registry/config.py rename to src/gradient/resources/databases/schema_registry/config.py index a815b84e..f9c0d8d0 100644 --- a/src/do_gradientai/resources/databases/schema_registry/config.py +++ b/src/gradient/resources/databases/schema_registry/config.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> ConfigResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ConfigResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> ConfigResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ConfigResourceWithStreamingResponse(self) @@ -236,7 +236,7 @@ def with_raw_response(self) -> AsyncConfigResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncConfigResourceWithRawResponse(self) @@ -245,7 +245,7 @@ def with_streaming_response(self) -> AsyncConfigResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncConfigResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/databases/schema_registry/schema_registry.py b/src/gradient/resources/databases/schema_registry/schema_registry.py similarity index 94% rename from src/do_gradientai/resources/databases/schema_registry/schema_registry.py rename to src/gradient/resources/databases/schema_registry/schema_registry.py index 6a0a44fb..dd7d3dbe 100644 --- a/src/do_gradientai/resources/databases/schema_registry/schema_registry.py +++ b/src/gradient/resources/databases/schema_registry/schema_registry.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> SchemaRegistryResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return SchemaRegistryResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> SchemaRegistryResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return SchemaRegistryResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncSchemaRegistryResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncSchemaRegistryResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncSchemaRegistryResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncSchemaRegistryResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/__init__.py b/src/gradient/resources/gpu_droplets/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/__init__.py rename to src/gradient/resources/gpu_droplets/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/account/__init__.py b/src/gradient/resources/gpu_droplets/account/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/account/__init__.py rename to src/gradient/resources/gpu_droplets/account/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/account/account.py b/src/gradient/resources/gpu_droplets/account/account.py similarity index 93% rename from src/do_gradientai/resources/gpu_droplets/account/account.py rename to src/gradient/resources/gpu_droplets/account/account.py index d61fb68b..5bcaf269 100644 --- a/src/do_gradientai/resources/gpu_droplets/account/account.py +++ b/src/gradient/resources/gpu_droplets/account/account.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AccountResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AccountResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AccountResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AccountResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAccountResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAccountResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAccountResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAccountResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/account/keys.py b/src/gradient/resources/gpu_droplets/account/keys.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/account/keys.py rename to src/gradient/resources/gpu_droplets/account/keys.py index 66d3bd55..f5cd4120 100644 --- a/src/do_gradientai/resources/gpu_droplets/account/keys.py +++ b/src/gradient/resources/gpu_droplets/account/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -272,7 +272,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -281,7 +281,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/actions.py b/src/gradient/resources/gpu_droplets/actions.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/actions.py rename to src/gradient/resources/gpu_droplets/actions.py index 197b2ce7..715fb076 100644 --- a/src/do_gradientai/resources/gpu_droplets/actions.py +++ b/src/gradient/resources/gpu_droplets/actions.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> ActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ActionsResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ActionsResourceWithStreamingResponse(self) @@ -1008,7 +1008,7 @@ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncActionsResourceWithRawResponse(self) @@ -1017,7 +1017,7 @@ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncActionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/autoscale.py b/src/gradient/resources/gpu_droplets/autoscale.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/autoscale.py rename to src/gradient/resources/gpu_droplets/autoscale.py index a1a72430..342256f6 100644 --- a/src/do_gradientai/resources/gpu_droplets/autoscale.py +++ b/src/gradient/resources/gpu_droplets/autoscale.py @@ -40,7 +40,7 @@ def with_raw_response(self) -> AutoscaleResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AutoscaleResourceWithRawResponse(self) @@ -49,7 +49,7 @@ def with_streaming_response(self) -> AutoscaleResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AutoscaleResourceWithStreamingResponse(self) @@ -447,7 +447,7 @@ def with_raw_response(self) -> AsyncAutoscaleResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAutoscaleResourceWithRawResponse(self) @@ -456,7 +456,7 @@ def with_streaming_response(self) -> AsyncAutoscaleResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAutoscaleResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/backups.py b/src/gradient/resources/gpu_droplets/backups.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/backups.py rename to src/gradient/resources/gpu_droplets/backups.py index 06fca19e..9f20a047 100644 --- a/src/do_gradientai/resources/gpu_droplets/backups.py +++ b/src/gradient/resources/gpu_droplets/backups.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> BackupsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return BackupsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> BackupsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return BackupsResourceWithStreamingResponse(self) @@ -213,7 +213,7 @@ def with_raw_response(self) -> AsyncBackupsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncBackupsResourceWithRawResponse(self) @@ -222,7 +222,7 @@ def with_streaming_response(self) -> AsyncBackupsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncBackupsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py rename to src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py index 46db6563..2f3b90cf 100644 --- a/src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py +++ b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> DestroyWithAssociatedResourcesResourceWithRawResp This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return DestroyWithAssociatedResourcesResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> DestroyWithAssociatedResourcesResourceWithS """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return DestroyWithAssociatedResourcesResourceWithStreamingResponse(self) @@ -291,7 +291,7 @@ def with_raw_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRa This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse(self) @@ -300,7 +300,7 @@ def with_streaming_response(self) -> AsyncDestroyWithAssociatedResourcesResource """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/gradient/resources/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py rename to src/gradient/resources/gpu_droplets/firewalls/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/gradient/resources/gpu_droplets/firewalls/droplets.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py rename to src/gradient/resources/gpu_droplets/firewalls/droplets.py index 025d1ba4..b25aa3e3 100644 --- a/src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py +++ b/src/gradient/resources/gpu_droplets/firewalls/droplets.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> DropletsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return DropletsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return DropletsResourceWithStreamingResponse(self) @@ -142,7 +142,7 @@ def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncDropletsResourceWithRawResponse(self) @@ -151,7 +151,7 @@ def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncDropletsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/gradient/resources/gpu_droplets/firewalls/firewalls.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py rename to src/gradient/resources/gpu_droplets/firewalls/firewalls.py index a6c21928..116cde8d 100644 --- a/src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py +++ b/src/gradient/resources/gpu_droplets/firewalls/firewalls.py @@ -68,7 +68,7 @@ def with_raw_response(self) -> FirewallsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return FirewallsResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> FirewallsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return FirewallsResourceWithStreamingResponse(self) @@ -301,7 +301,7 @@ def with_raw_response(self) -> AsyncFirewallsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncFirewallsResourceWithRawResponse(self) @@ -310,7 +310,7 @@ def with_streaming_response(self) -> AsyncFirewallsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncFirewallsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/rules.py b/src/gradient/resources/gpu_droplets/firewalls/rules.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/firewalls/rules.py rename to src/gradient/resources/gpu_droplets/firewalls/rules.py index 61026779..d3a77cd9 100644 --- a/src/do_gradientai/resources/gpu_droplets/firewalls/rules.py +++ b/src/gradient/resources/gpu_droplets/firewalls/rules.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> RulesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return RulesResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> RulesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return RulesResourceWithStreamingResponse(self) @@ -154,7 +154,7 @@ def with_raw_response(self) -> AsyncRulesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncRulesResourceWithRawResponse(self) @@ -163,7 +163,7 @@ def with_streaming_response(self) -> AsyncRulesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncRulesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/firewalls/tags.py b/src/gradient/resources/gpu_droplets/firewalls/tags.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/firewalls/tags.py rename to src/gradient/resources/gpu_droplets/firewalls/tags.py index 725bc014..dc66c72f 100644 --- a/src/do_gradientai/resources/gpu_droplets/firewalls/tags.py +++ b/src/gradient/resources/gpu_droplets/firewalls/tags.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> TagsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return TagsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> TagsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return TagsResourceWithStreamingResponse(self) @@ -148,7 +148,7 @@ def with_raw_response(self) -> AsyncTagsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncTagsResourceWithRawResponse(self) @@ -157,7 +157,7 @@ def with_streaming_response(self) -> AsyncTagsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncTagsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/gradient/resources/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py rename to src/gradient/resources/gpu_droplets/floating_ips/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/gradient/resources/gpu_droplets/floating_ips/actions.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py rename to src/gradient/resources/gpu_droplets/floating_ips/actions.py index 7ba3899d..ecf88993 100644 --- a/src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py +++ b/src/gradient/resources/gpu_droplets/floating_ips/actions.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> ActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ActionsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ActionsResourceWithStreamingResponse(self) @@ -234,7 +234,7 @@ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncActionsResourceWithRawResponse(self) @@ -243,7 +243,7 @@ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncActionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py rename to src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py index cabe012e..f70f153f 100644 --- a/src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py +++ b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py @@ -44,7 +44,7 @@ def with_raw_response(self) -> FloatingIPsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return FloatingIPsResourceWithRawResponse(self) @@ -53,7 +53,7 @@ def with_streaming_response(self) -> FloatingIPsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return FloatingIPsResourceWithStreamingResponse(self) @@ -301,7 +301,7 @@ def with_raw_response(self) -> AsyncFloatingIPsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncFloatingIPsResourceWithRawResponse(self) @@ -310,7 +310,7 @@ def with_streaming_response(self) -> AsyncFloatingIPsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncFloatingIPsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/gpu_droplets.py b/src/gradient/resources/gpu_droplets/gpu_droplets.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/gpu_droplets.py rename to src/gradient/resources/gpu_droplets/gpu_droplets.py index cbb07830..0ce55ba8 100644 --- a/src/do_gradientai/resources/gpu_droplets/gpu_droplets.py +++ b/src/gradient/resources/gpu_droplets/gpu_droplets.py @@ -189,7 +189,7 @@ def with_raw_response(self) -> GPUDropletsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return GPUDropletsResourceWithRawResponse(self) @@ -198,7 +198,7 @@ def with_streaming_response(self) -> GPUDropletsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return GPUDropletsResourceWithStreamingResponse(self) @@ -960,7 +960,7 @@ def with_raw_response(self) -> AsyncGPUDropletsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncGPUDropletsResourceWithRawResponse(self) @@ -969,7 +969,7 @@ def with_streaming_response(self) -> AsyncGPUDropletsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncGPUDropletsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/images/__init__.py b/src/gradient/resources/gpu_droplets/images/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/images/__init__.py rename to src/gradient/resources/gpu_droplets/images/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/images/actions.py b/src/gradient/resources/gpu_droplets/images/actions.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/images/actions.py rename to src/gradient/resources/gpu_droplets/images/actions.py index 9428418b..287558ca 100644 --- a/src/do_gradientai/resources/gpu_droplets/images/actions.py +++ b/src/gradient/resources/gpu_droplets/images/actions.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ActionsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ActionsResourceWithStreamingResponse(self) @@ -269,7 +269,7 @@ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncActionsResourceWithRawResponse(self) @@ -278,7 +278,7 @@ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncActionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/images/images.py b/src/gradient/resources/gpu_droplets/images/images.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/images/images.py rename to src/gradient/resources/gpu_droplets/images/images.py index 2c70e793..09994263 100644 --- a/src/do_gradientai/resources/gpu_droplets/images/images.py +++ b/src/gradient/resources/gpu_droplets/images/images.py @@ -46,7 +46,7 @@ def with_raw_response(self) -> ImagesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ImagesResourceWithRawResponse(self) @@ -55,7 +55,7 @@ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ImagesResourceWithStreamingResponse(self) @@ -412,7 +412,7 @@ def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncImagesResourceWithRawResponse(self) @@ -421,7 +421,7 @@ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncImagesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/gradient/resources/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py rename to src/gradient/resources/gpu_droplets/load_balancers/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/gradient/resources/gpu_droplets/load_balancers/droplets.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py rename to src/gradient/resources/gpu_droplets/load_balancers/droplets.py index 2553a729..4d8eb4c5 100644 --- a/src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/droplets.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> DropletsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return DropletsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return DropletsResourceWithStreamingResponse(self) @@ -145,7 +145,7 @@ def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncDropletsResourceWithRawResponse(self) @@ -154,7 +154,7 @@ def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncDropletsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py rename to src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py index 2ba20f88..c4be65e7 100644 --- a/src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ForwardingRulesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ForwardingRulesResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ForwardingRulesResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ForwardingRulesResourceWithStreamingResponse(self) @@ -145,7 +145,7 @@ def with_raw_response(self) -> AsyncForwardingRulesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncForwardingRulesResourceWithRawResponse(self) @@ -154,7 +154,7 @@ def with_streaming_response(self) -> AsyncForwardingRulesResourceWithStreamingRe """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncForwardingRulesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py rename to src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py index c724b6d9..d876b50f 100644 --- a/src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py @@ -68,7 +68,7 @@ def with_raw_response(self) -> LoadBalancersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return LoadBalancersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> LoadBalancersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return LoadBalancersResourceWithStreamingResponse(self) @@ -1080,7 +1080,7 @@ def with_raw_response(self) -> AsyncLoadBalancersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncLoadBalancersResourceWithRawResponse(self) @@ -1089,7 +1089,7 @@ def with_streaming_response(self) -> AsyncLoadBalancersResourceWithStreamingResp """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncLoadBalancersResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/sizes.py b/src/gradient/resources/gpu_droplets/sizes.py similarity index 96% rename from src/do_gradientai/resources/gpu_droplets/sizes.py rename to src/gradient/resources/gpu_droplets/sizes.py index e37116c7..7cfc5629 100644 --- a/src/do_gradientai/resources/gpu_droplets/sizes.py +++ b/src/gradient/resources/gpu_droplets/sizes.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> SizesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return SizesResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> SizesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return SizesResourceWithStreamingResponse(self) @@ -99,7 +99,7 @@ def with_raw_response(self) -> AsyncSizesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncSizesResourceWithRawResponse(self) @@ -108,7 +108,7 @@ def with_streaming_response(self) -> AsyncSizesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncSizesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/snapshots.py b/src/gradient/resources/gpu_droplets/snapshots.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/snapshots.py rename to src/gradient/resources/gpu_droplets/snapshots.py index 081ab5b8..eed93cfd 100644 --- a/src/do_gradientai/resources/gpu_droplets/snapshots.py +++ b/src/gradient/resources/gpu_droplets/snapshots.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> SnapshotsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return SnapshotsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return SnapshotsResourceWithStreamingResponse(self) @@ -202,7 +202,7 @@ def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncSnapshotsResourceWithRawResponse(self) @@ -211,7 +211,7 @@ def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncSnapshotsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/__init__.py b/src/gradient/resources/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/do_gradientai/resources/gpu_droplets/volumes/__init__.py rename to src/gradient/resources/gpu_droplets/volumes/__init__.py diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/actions.py b/src/gradient/resources/gpu_droplets/volumes/actions.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/volumes/actions.py rename to src/gradient/resources/gpu_droplets/volumes/actions.py index 9d925397..2e093136 100644 --- a/src/do_gradientai/resources/gpu_droplets/volumes/actions.py +++ b/src/gradient/resources/gpu_droplets/volumes/actions.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ActionsResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ActionsResourceWithStreamingResponse(self) @@ -764,7 +764,7 @@ def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncActionsResourceWithRawResponse(self) @@ -773,7 +773,7 @@ def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncActionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/gradient/resources/gpu_droplets/volumes/snapshots.py similarity index 98% rename from src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py rename to src/gradient/resources/gpu_droplets/volumes/snapshots.py index 766d9a3a..0f9e30fa 100644 --- a/src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py +++ b/src/gradient/resources/gpu_droplets/volumes/snapshots.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> SnapshotsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return SnapshotsResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return SnapshotsResourceWithStreamingResponse(self) @@ -233,7 +233,7 @@ def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncSnapshotsResourceWithRawResponse(self) @@ -242,7 +242,7 @@ def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncSnapshotsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/gpu_droplets/volumes/volumes.py b/src/gradient/resources/gpu_droplets/volumes/volumes.py similarity index 99% rename from src/do_gradientai/resources/gpu_droplets/volumes/volumes.py rename to src/gradient/resources/gpu_droplets/volumes/volumes.py index efd1d4ae..ada4aedf 100644 --- a/src/do_gradientai/resources/gpu_droplets/volumes/volumes.py +++ b/src/gradient/resources/gpu_droplets/volumes/volumes.py @@ -57,7 +57,7 @@ def with_raw_response(self) -> VolumesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return VolumesResourceWithRawResponse(self) @@ -66,7 +66,7 @@ def with_streaming_response(self) -> VolumesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return VolumesResourceWithStreamingResponse(self) @@ -550,7 +550,7 @@ def with_raw_response(self) -> AsyncVolumesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncVolumesResourceWithRawResponse(self) @@ -559,7 +559,7 @@ def with_streaming_response(self) -> AsyncVolumesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncVolumesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/inference/__init__.py b/src/gradient/resources/inference/__init__.py similarity index 100% rename from src/do_gradientai/resources/inference/__init__.py rename to src/gradient/resources/inference/__init__.py diff --git a/src/do_gradientai/resources/inference/api_keys.py b/src/gradient/resources/inference/api_keys.py similarity index 99% rename from src/do_gradientai/resources/inference/api_keys.py rename to src/gradient/resources/inference/api_keys.py index 238ef6f6..fa7f86dc 100644 --- a/src/do_gradientai/resources/inference/api_keys.py +++ b/src/gradient/resources/inference/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -258,7 +258,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -267,7 +267,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/inference/inference.py b/src/gradient/resources/inference/inference.py similarity index 94% rename from src/do_gradientai/resources/inference/inference.py rename to src/gradient/resources/inference/inference.py index a144bae0..d22543b3 100644 --- a/src/do_gradientai/resources/inference/inference.py +++ b/src/gradient/resources/inference/inference.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> InferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return InferenceResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> InferenceResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return InferenceResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncInferenceResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncInferenceResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/knowledge_bases/__init__.py b/src/gradient/resources/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/__init__.py rename to src/gradient/resources/knowledge_bases/__init__.py diff --git a/src/do_gradientai/resources/knowledge_bases/data_sources.py b/src/gradient/resources/knowledge_bases/data_sources.py similarity index 98% rename from src/do_gradientai/resources/knowledge_bases/data_sources.py rename to src/gradient/resources/knowledge_bases/data_sources.py index 8357dfda..16252324 100644 --- a/src/do_gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradient/resources/knowledge_bases/data_sources.py @@ -36,7 +36,7 @@ def with_raw_response(self) -> DataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return DataSourcesResourceWithRawResponse(self) @@ -45,7 +45,7 @@ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return DataSourcesResourceWithStreamingResponse(self) @@ -211,7 +211,7 @@ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncDataSourcesResourceWithRawResponse(self) @@ -220,7 +220,7 @@ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncDataSourcesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py similarity index 99% rename from src/do_gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/gradient/resources/knowledge_bases/indexing_jobs.py index 891acd0b..723b42f5 100644 --- a/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py @@ -38,7 +38,7 @@ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return IndexingJobsResourceWithRawResponse(self) @@ -47,7 +47,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return IndexingJobsResourceWithStreamingResponse(self) @@ -269,7 +269,7 @@ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncIndexingJobsResourceWithRawResponse(self) @@ -278,7 +278,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncIndexingJobsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py similarity index 99% rename from src/do_gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/gradient/resources/knowledge_bases/knowledge_bases.py index c181295c..594b2ba7 100644 --- a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py @@ -58,7 +58,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -67,7 +67,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -346,7 +346,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -355,7 +355,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/models/__init__.py b/src/gradient/resources/models/__init__.py similarity index 100% rename from src/do_gradientai/resources/models/__init__.py rename to src/gradient/resources/models/__init__.py diff --git a/src/do_gradientai/resources/models/models.py b/src/gradient/resources/models/models.py similarity index 97% rename from src/do_gradientai/resources/models/models.py rename to src/gradient/resources/models/models.py index 3800c03c..ffb5fea9 100644 --- a/src/do_gradientai/resources/models/models.py +++ b/src/gradient/resources/models/models.py @@ -43,7 +43,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -52,7 +52,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -144,7 +144,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -153,7 +153,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/models/providers/__init__.py b/src/gradient/resources/models/providers/__init__.py similarity index 100% rename from src/do_gradientai/resources/models/providers/__init__.py rename to src/gradient/resources/models/providers/__init__.py diff --git a/src/do_gradientai/resources/models/providers/anthropic.py b/src/gradient/resources/models/providers/anthropic.py similarity index 99% rename from src/do_gradientai/resources/models/providers/anthropic.py rename to src/gradient/resources/models/providers/anthropic.py index e570be51..ddb0eef8 100644 --- a/src/do_gradientai/resources/models/providers/anthropic.py +++ b/src/gradient/resources/models/providers/anthropic.py @@ -38,7 +38,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -47,7 +47,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -330,7 +330,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -339,7 +339,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/models/providers/openai.py b/src/gradient/resources/models/providers/openai.py similarity index 99% rename from src/do_gradientai/resources/models/providers/openai.py rename to src/gradient/resources/models/providers/openai.py index ccd594b8..166e284d 100644 --- a/src/do_gradientai/resources/models/providers/openai.py +++ b/src/gradient/resources/models/providers/openai.py @@ -38,7 +38,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -47,7 +47,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -328,7 +328,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -337,7 +337,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/models/providers/providers.py b/src/gradient/resources/models/providers/providers.py similarity index 95% rename from src/do_gradientai/resources/models/providers/providers.py rename to src/gradient/resources/models/providers/providers.py index 3e3f4dde..efb71ec5 100644 --- a/src/do_gradientai/resources/models/providers/providers.py +++ b/src/gradient/resources/models/providers/providers.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/resources/regions.py b/src/gradient/resources/regions.py similarity index 96% rename from src/do_gradientai/resources/regions.py rename to src/gradient/resources/regions.py index e953e4f3..779bd4dd 100644 --- a/src/do_gradientai/resources/regions.py +++ b/src/gradient/resources/regions.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> RegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return RegionsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return RegionsResourceWithStreamingResponse(self) @@ -98,7 +98,7 @@ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ return AsyncRegionsResourceWithRawResponse(self) @@ -107,7 +107,7 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ return AsyncRegionsResourceWithStreamingResponse(self) diff --git a/src/do_gradientai/types/__init__.py b/src/gradient/types/__init__.py similarity index 100% rename from src/do_gradientai/types/__init__.py rename to src/gradient/types/__init__.py diff --git a/src/do_gradientai/types/agent_create_params.py b/src/gradient/types/agent_create_params.py similarity index 100% rename from src/do_gradientai/types/agent_create_params.py rename to src/gradient/types/agent_create_params.py diff --git a/src/do_gradientai/types/agent_create_response.py b/src/gradient/types/agent_create_response.py similarity index 100% rename from src/do_gradientai/types/agent_create_response.py rename to src/gradient/types/agent_create_response.py diff --git a/src/do_gradientai/types/agent_delete_response.py b/src/gradient/types/agent_delete_response.py similarity index 100% rename from src/do_gradientai/types/agent_delete_response.py rename to src/gradient/types/agent_delete_response.py diff --git a/src/do_gradientai/types/agent_list_params.py b/src/gradient/types/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agent_list_params.py rename to src/gradient/types/agent_list_params.py diff --git a/src/do_gradientai/types/agent_list_response.py b/src/gradient/types/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agent_list_response.py rename to src/gradient/types/agent_list_response.py diff --git a/src/do_gradientai/types/agent_retrieve_response.py b/src/gradient/types/agent_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agent_retrieve_response.py rename to src/gradient/types/agent_retrieve_response.py diff --git a/src/do_gradientai/types/agent_update_params.py b/src/gradient/types/agent_update_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_params.py rename to src/gradient/types/agent_update_params.py diff --git a/src/do_gradientai/types/agent_update_response.py b/src/gradient/types/agent_update_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_response.py rename to src/gradient/types/agent_update_response.py diff --git a/src/do_gradientai/types/agent_update_status_params.py b/src/gradient/types/agent_update_status_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_params.py rename to src/gradient/types/agent_update_status_params.py diff --git a/src/do_gradientai/types/agent_update_status_response.py b/src/gradient/types/agent_update_status_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_response.py rename to src/gradient/types/agent_update_status_response.py diff --git a/src/do_gradientai/types/agents/__init__.py b/src/gradient/types/agents/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/__init__.py rename to src/gradient/types/agents/__init__.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric.py b/src/gradient/types/agents/api_evaluation_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric.py rename to src/gradient/types/agents/api_evaluation_metric.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric_result.py b/src/gradient/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric_result.py rename to src/gradient/types/agents/api_evaluation_metric_result.py diff --git a/src/do_gradientai/types/agents/api_evaluation_prompt.py b/src/gradient/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_prompt.py rename to src/gradient/types/agents/api_evaluation_prompt.py diff --git a/src/do_gradientai/types/agents/api_evaluation_run.py b/src/gradient/types/agents/api_evaluation_run.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_run.py rename to src/gradient/types/agents/api_evaluation_run.py diff --git a/src/do_gradientai/types/agents/api_evaluation_test_case.py b/src/gradient/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_test_case.py rename to src/gradient/types/agents/api_evaluation_test_case.py diff --git a/src/do_gradientai/types/agents/api_key_create_params.py b/src/gradient/types/agents/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_params.py rename to src/gradient/types/agents/api_key_create_params.py diff --git a/src/do_gradientai/types/agents/api_key_create_response.py b/src/gradient/types/agents/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_response.py rename to src/gradient/types/agents/api_key_create_response.py diff --git a/src/do_gradientai/types/agents/api_key_delete_response.py b/src/gradient/types/agents/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_delete_response.py rename to src/gradient/types/agents/api_key_delete_response.py diff --git a/src/do_gradientai/types/agents/api_key_list_params.py b/src/gradient/types/agents/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_params.py rename to src/gradient/types/agents/api_key_list_params.py diff --git a/src/do_gradientai/types/agents/api_key_list_response.py b/src/gradient/types/agents/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_response.py rename to src/gradient/types/agents/api_key_list_response.py diff --git a/src/do_gradientai/types/agents/api_key_regenerate_response.py b/src/gradient/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_regenerate_response.py rename to src/gradient/types/agents/api_key_regenerate_response.py diff --git a/src/do_gradientai/types/agents/api_key_update_params.py b/src/gradient/types/agents/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_params.py rename to src/gradient/types/agents/api_key_update_params.py diff --git a/src/do_gradientai/types/agents/api_key_update_response.py b/src/gradient/types/agents/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_response.py rename to src/gradient/types/agents/api_key_update_response.py diff --git a/src/do_gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradient/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/do_gradientai/types/agents/api_link_knowledge_base_output.py rename to src/gradient/types/agents/api_link_knowledge_base_output.py diff --git a/src/do_gradientai/types/agents/api_star_metric.py b/src/gradient/types/agents/api_star_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric.py rename to src/gradient/types/agents/api_star_metric.py diff --git a/src/do_gradientai/types/agents/api_star_metric_param.py b/src/gradient/types/agents/api_star_metric_param.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric_param.py rename to src/gradient/types/agents/api_star_metric_param.py diff --git a/src/do_gradientai/types/agents/chat/__init__.py b/src/gradient/types/agents/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/chat/__init__.py rename to src/gradient/types/agents/chat/__init__.py diff --git a/src/do_gradientai/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/chat/completion_create_params.py rename to src/gradient/types/agents/chat/completion_create_params.py diff --git a/src/do_gradientai/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/chat/completion_create_response.py rename to src/gradient/types/agents/chat/completion_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/gradient/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_params.py b/src/gradient/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_params.py rename to src/gradient/types/agents/evaluation_dataset_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_response.py b/src/gradient/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_response.py rename to src/gradient/types/agents/evaluation_dataset_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/gradient/types/agents/evaluation_metric_list_regions_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py rename to src/gradient/types/agents/evaluation_metric_list_regions_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/gradient/types/agents/evaluation_metric_list_regions_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py rename to src/gradient/types/agents/evaluation_metric_list_regions_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_response.py b/src/gradient/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_response.py rename to src/gradient/types/agents/evaluation_metric_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradient/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/__init__.py rename to src/gradient/types/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py b/src/gradient/types/agents/evaluation_metrics/anthropic/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_delete_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py b/src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py rename to src/gradient/types/agents/evaluation_metrics/anthropic/key_update_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/gradient/types/agents/evaluation_metrics/model_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/gradient/types/agents/evaluation_metrics/model_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/gradient/types/agents/evaluation_metrics/model_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py rename to src/gradient/types/agents/evaluation_metrics/model_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py b/src/gradient/types/agents/evaluation_metrics/openai/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py rename to src/gradient/types/agents/evaluation_metrics/openai/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_delete_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_list_agents_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py b/src/gradient/types/agents/evaluation_metrics/openai/key_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py b/src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py rename to src/gradient/types/agents/evaluation_metrics/openai/key_update_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/gradient/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/gradient/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/gradient/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/gradient/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/gradient/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/gradient/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradient/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/gradient/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/gradient/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/gradient/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/gradient/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradient/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/gradient/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_params.py b/src/gradient/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_params.py rename to src/gradient/types/agents/evaluation_run_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_response.py b/src/gradient/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_response.py rename to src/gradient/types/agents/evaluation_run_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_params.py b/src/gradient/types/agents/evaluation_run_list_results_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_params.py rename to src/gradient/types/agents/evaluation_run_list_results_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradient/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_response.py rename to src/gradient/types/agents/evaluation_run_list_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py b/src/gradient/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/gradient/types/agents/evaluation_run_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/gradient/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/gradient/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_params.py b/src/gradient/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_params.py rename to src/gradient/types/agents/evaluation_test_case_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_response.py b/src/gradient/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_response.py rename to src/gradient/types/agents/evaluation_test_case_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/gradient/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradient/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_response.py rename to src/gradient/types/agents/evaluation_test_case_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/gradient/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/gradient/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/gradient/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/gradient/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradient/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_params.py rename to src/gradient/types/agents/evaluation_test_case_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_response.py b/src/gradient/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_response.py rename to src/gradient/types/agents/evaluation_test_case_update_response.py diff --git a/src/do_gradientai/types/agents/function_create_params.py b/src/gradient/types/agents/function_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_params.py rename to src/gradient/types/agents/function_create_params.py diff --git a/src/do_gradientai/types/agents/function_create_response.py b/src/gradient/types/agents/function_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_response.py rename to src/gradient/types/agents/function_create_response.py diff --git a/src/do_gradientai/types/agents/function_delete_response.py b/src/gradient/types/agents/function_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_delete_response.py rename to src/gradient/types/agents/function_delete_response.py diff --git a/src/do_gradientai/types/agents/function_update_params.py b/src/gradient/types/agents/function_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_params.py rename to src/gradient/types/agents/function_update_params.py diff --git a/src/do_gradientai/types/agents/function_update_response.py b/src/gradient/types/agents/function_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_response.py rename to src/gradient/types/agents/function_update_response.py diff --git a/src/do_gradientai/types/agents/knowledge_base_detach_response.py b/src/gradient/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/do_gradientai/types/agents/knowledge_base_detach_response.py rename to src/gradient/types/agents/knowledge_base_detach_response.py diff --git a/src/do_gradientai/types/agents/route_add_params.py b/src/gradient/types/agents/route_add_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_params.py rename to src/gradient/types/agents/route_add_params.py diff --git a/src/do_gradientai/types/agents/route_add_response.py b/src/gradient/types/agents/route_add_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_response.py rename to src/gradient/types/agents/route_add_response.py diff --git a/src/do_gradientai/types/agents/route_delete_response.py b/src/gradient/types/agents/route_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_delete_response.py rename to src/gradient/types/agents/route_delete_response.py diff --git a/src/do_gradientai/types/agents/route_update_params.py b/src/gradient/types/agents/route_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_params.py rename to src/gradient/types/agents/route_update_params.py diff --git a/src/do_gradientai/types/agents/route_update_response.py b/src/gradient/types/agents/route_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_response.py rename to src/gradient/types/agents/route_update_response.py diff --git a/src/do_gradientai/types/agents/route_view_response.py b/src/gradient/types/agents/route_view_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_view_response.py rename to src/gradient/types/agents/route_view_response.py diff --git a/src/do_gradientai/types/agents/version_list_params.py b/src/gradient/types/agents/version_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_params.py rename to src/gradient/types/agents/version_list_params.py diff --git a/src/do_gradientai/types/agents/version_list_response.py b/src/gradient/types/agents/version_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_response.py rename to src/gradient/types/agents/version_list_response.py diff --git a/src/do_gradientai/types/agents/version_update_params.py b/src/gradient/types/agents/version_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_params.py rename to src/gradient/types/agents/version_update_params.py diff --git a/src/do_gradientai/types/agents/version_update_response.py b/src/gradient/types/agents/version_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_response.py rename to src/gradient/types/agents/version_update_response.py diff --git a/src/do_gradientai/types/api_agent.py b/src/gradient/types/api_agent.py similarity index 100% rename from src/do_gradientai/types/api_agent.py rename to src/gradient/types/api_agent.py diff --git a/src/do_gradientai/types/api_agent_api_key_info.py b/src/gradient/types/api_agent_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_agent_api_key_info.py rename to src/gradient/types/api_agent_api_key_info.py diff --git a/src/do_gradientai/types/api_agent_model.py b/src/gradient/types/api_agent_model.py similarity index 100% rename from src/do_gradientai/types/api_agent_model.py rename to src/gradient/types/api_agent_model.py diff --git a/src/do_gradientai/types/api_agreement.py b/src/gradient/types/api_agreement.py similarity index 100% rename from src/do_gradientai/types/api_agreement.py rename to src/gradient/types/api_agreement.py diff --git a/src/do_gradientai/types/api_anthropic_api_key_info.py b/src/gradient/types/api_anthropic_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_anthropic_api_key_info.py rename to src/gradient/types/api_anthropic_api_key_info.py diff --git a/src/do_gradientai/types/api_deployment_visibility.py b/src/gradient/types/api_deployment_visibility.py similarity index 100% rename from src/do_gradientai/types/api_deployment_visibility.py rename to src/gradient/types/api_deployment_visibility.py diff --git a/src/do_gradientai/types/api_knowledge_base.py b/src/gradient/types/api_knowledge_base.py similarity index 100% rename from src/do_gradientai/types/api_knowledge_base.py rename to src/gradient/types/api_knowledge_base.py diff --git a/src/do_gradientai/types/api_model.py b/src/gradient/types/api_model.py similarity index 100% rename from src/do_gradientai/types/api_model.py rename to src/gradient/types/api_model.py diff --git a/src/do_gradientai/types/api_model_version.py b/src/gradient/types/api_model_version.py similarity index 100% rename from src/do_gradientai/types/api_model_version.py rename to src/gradient/types/api_model_version.py diff --git a/src/do_gradientai/types/api_openai_api_key_info.py b/src/gradient/types/api_openai_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_openai_api_key_info.py rename to src/gradient/types/api_openai_api_key_info.py diff --git a/src/do_gradientai/types/api_retrieval_method.py b/src/gradient/types/api_retrieval_method.py similarity index 100% rename from src/do_gradientai/types/api_retrieval_method.py rename to src/gradient/types/api_retrieval_method.py diff --git a/src/do_gradientai/types/api_workspace.py b/src/gradient/types/api_workspace.py similarity index 100% rename from src/do_gradientai/types/api_workspace.py rename to src/gradient/types/api_workspace.py diff --git a/src/do_gradientai/types/chat/__init__.py b/src/gradient/types/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/chat/__init__.py rename to src/gradient/types/chat/__init__.py diff --git a/src/do_gradientai/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_params.py rename to src/gradient/types/chat/completion_create_params.py diff --git a/src/do_gradientai/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_response.py rename to src/gradient/types/chat/completion_create_response.py diff --git a/src/do_gradientai/types/databases/__init__.py b/src/gradient/types/databases/__init__.py similarity index 100% rename from src/do_gradientai/types/databases/__init__.py rename to src/gradient/types/databases/__init__.py diff --git a/src/do_gradientai/types/databases/schema_registry/__init__.py b/src/gradient/types/databases/schema_registry/__init__.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/__init__.py rename to src/gradient/types/databases/schema_registry/__init__.py diff --git a/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py b/src/gradient/types/databases/schema_registry/config_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py rename to src/gradient/types/databases/schema_registry/config_retrieve_response.py diff --git a/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py b/src/gradient/types/databases/schema_registry/config_retrieve_subject_response.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py rename to src/gradient/types/databases/schema_registry/config_retrieve_subject_response.py diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_params.py b/src/gradient/types/databases/schema_registry/config_update_params.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/config_update_params.py rename to src/gradient/types/databases/schema_registry/config_update_params.py diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_response.py b/src/gradient/types/databases/schema_registry/config_update_response.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/config_update_response.py rename to src/gradient/types/databases/schema_registry/config_update_response.py diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py b/src/gradient/types/databases/schema_registry/config_update_subject_params.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py rename to src/gradient/types/databases/schema_registry/config_update_subject_params.py diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py b/src/gradient/types/databases/schema_registry/config_update_subject_response.py similarity index 100% rename from src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py rename to src/gradient/types/databases/schema_registry/config_update_subject_response.py diff --git a/src/do_gradientai/types/droplet_backup_policy.py b/src/gradient/types/droplet_backup_policy.py similarity index 100% rename from src/do_gradientai/types/droplet_backup_policy.py rename to src/gradient/types/droplet_backup_policy.py diff --git a/src/do_gradientai/types/droplet_backup_policy_param.py b/src/gradient/types/droplet_backup_policy_param.py similarity index 100% rename from src/do_gradientai/types/droplet_backup_policy_param.py rename to src/gradient/types/droplet_backup_policy_param.py diff --git a/src/do_gradientai/types/gpu_droplet_create_params.py b/src/gradient/types/gpu_droplet_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_create_params.py rename to src/gradient/types/gpu_droplet_create_params.py diff --git a/src/do_gradientai/types/gpu_droplet_create_response.py b/src/gradient/types/gpu_droplet_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_create_response.py rename to src/gradient/types/gpu_droplet_create_response.py diff --git a/src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/gradient/types/gpu_droplet_delete_by_tag_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py rename to src/gradient/types/gpu_droplet_delete_by_tag_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_firewalls_params.py b/src/gradient/types/gpu_droplet_list_firewalls_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_firewalls_params.py rename to src/gradient/types/gpu_droplet_list_firewalls_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_firewalls_response.py b/src/gradient/types/gpu_droplet_list_firewalls_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_firewalls_response.py rename to src/gradient/types/gpu_droplet_list_firewalls_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_kernels_params.py b/src/gradient/types/gpu_droplet_list_kernels_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_kernels_params.py rename to src/gradient/types/gpu_droplet_list_kernels_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_kernels_response.py b/src/gradient/types/gpu_droplet_list_kernels_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_kernels_response.py rename to src/gradient/types/gpu_droplet_list_kernels_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_neighbors_response.py b/src/gradient/types/gpu_droplet_list_neighbors_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_neighbors_response.py rename to src/gradient/types/gpu_droplet_list_neighbors_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_params.py b/src/gradient/types/gpu_droplet_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_params.py rename to src/gradient/types/gpu_droplet_list_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_response.py b/src/gradient/types/gpu_droplet_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_response.py rename to src/gradient/types/gpu_droplet_list_response.py diff --git a/src/do_gradientai/types/gpu_droplet_list_snapshots_params.py b/src/gradient/types/gpu_droplet_list_snapshots_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_snapshots_params.py rename to src/gradient/types/gpu_droplet_list_snapshots_params.py diff --git a/src/do_gradientai/types/gpu_droplet_list_snapshots_response.py b/src/gradient/types/gpu_droplet_list_snapshots_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_list_snapshots_response.py rename to src/gradient/types/gpu_droplet_list_snapshots_response.py diff --git a/src/do_gradientai/types/gpu_droplet_retrieve_response.py b/src/gradient/types/gpu_droplet_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplet_retrieve_response.py rename to src/gradient/types/gpu_droplet_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/__init__.py b/src/gradient/types/gpu_droplets/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/__init__.py rename to src/gradient/types/gpu_droplets/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/account/__init__.py b/src/gradient/types/gpu_droplets/account/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/__init__.py rename to src/gradient/types/gpu_droplets/account/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_create_params.py b/src/gradient/types/gpu_droplets/account/key_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_create_params.py rename to src/gradient/types/gpu_droplets/account/key_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_create_response.py b/src/gradient/types/gpu_droplets/account/key_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_create_response.py rename to src/gradient/types/gpu_droplets/account/key_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_list_params.py b/src/gradient/types/gpu_droplets/account/key_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_list_params.py rename to src/gradient/types/gpu_droplets/account/key_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_list_response.py b/src/gradient/types/gpu_droplets/account/key_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_list_response.py rename to src/gradient/types/gpu_droplets/account/key_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py rename to src/gradient/types/gpu_droplets/account/key_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_update_params.py b/src/gradient/types/gpu_droplets/account/key_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_update_params.py rename to src/gradient/types/gpu_droplets/account/key_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/account/key_update_response.py b/src/gradient/types/gpu_droplets/account/key_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/account/key_update_response.py rename to src/gradient/types/gpu_droplets/account/key_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/gradient/types/gpu_droplets/action_bulk_initiate_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py rename to src/gradient/types/gpu_droplets/action_bulk_initiate_params.py diff --git a/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/gradient/types/gpu_droplets/action_bulk_initiate_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py rename to src/gradient/types/gpu_droplets/action_bulk_initiate_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_initiate_params.py b/src/gradient/types/gpu_droplets/action_initiate_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_initiate_params.py rename to src/gradient/types/gpu_droplets/action_initiate_params.py diff --git a/src/do_gradientai/types/gpu_droplets/action_initiate_response.py b/src/gradient/types/gpu_droplets/action_initiate_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_initiate_response.py rename to src/gradient/types/gpu_droplets/action_initiate_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_list_params.py b/src/gradient/types/gpu_droplets/action_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_list_params.py rename to src/gradient/types/gpu_droplets/action_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/action_list_response.py b/src/gradient/types/gpu_droplets/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_list_response.py rename to src/gradient/types/gpu_droplets/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/action_retrieve_response.py b/src/gradient/types/gpu_droplets/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/action_retrieve_response.py rename to src/gradient/types/gpu_droplets/action_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/associated_resource.py b/src/gradient/types/gpu_droplets/associated_resource.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/associated_resource.py rename to src/gradient/types/gpu_droplets/associated_resource.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_create_params.py b/src/gradient/types/gpu_droplets/autoscale_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_create_params.py rename to src/gradient/types/gpu_droplets/autoscale_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_create_response.py b/src/gradient/types/gpu_droplets/autoscale_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_create_response.py rename to src/gradient/types/gpu_droplets/autoscale_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/gradient/types/gpu_droplets/autoscale_list_history_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py rename to src/gradient/types/gpu_droplets/autoscale_list_history_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/gradient/types/gpu_droplets/autoscale_list_history_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py rename to src/gradient/types/gpu_droplets/autoscale_list_history_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/gradient/types/gpu_droplets/autoscale_list_members_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py rename to src/gradient/types/gpu_droplets/autoscale_list_members_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/gradient/types/gpu_droplets/autoscale_list_members_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py rename to src/gradient/types/gpu_droplets/autoscale_list_members_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_params.py b/src/gradient/types/gpu_droplets/autoscale_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_params.py rename to src/gradient/types/gpu_droplets/autoscale_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_list_response.py b/src/gradient/types/gpu_droplets/autoscale_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_list_response.py rename to src/gradient/types/gpu_droplets/autoscale_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool.py b/src/gradient/types/gpu_droplets/autoscale_pool.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool.py rename to src/gradient/types/gpu_droplets/autoscale_pool.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py rename to src/gradient/types/gpu_droplets/autoscale_pool_droplet_template.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py rename to src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py rename to src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py rename to src/gradient/types/gpu_droplets/autoscale_pool_dynamic_config_param.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/gradient/types/gpu_droplets/autoscale_pool_static_config.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py rename to src/gradient/types/gpu_droplets/autoscale_pool_static_config.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_static_config_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py rename to src/gradient/types/gpu_droplets/autoscale_pool_static_config_param.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/gradient/types/gpu_droplets/autoscale_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py rename to src/gradient/types/gpu_droplets/autoscale_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_update_params.py b/src/gradient/types/gpu_droplets/autoscale_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_update_params.py rename to src/gradient/types/gpu_droplets/autoscale_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/autoscale_update_response.py b/src/gradient/types/gpu_droplets/autoscale_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/autoscale_update_response.py rename to src/gradient/types/gpu_droplets/autoscale_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_params.py b/src/gradient/types/gpu_droplets/backup_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_params.py rename to src/gradient/types/gpu_droplets/backup_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/gradient/types/gpu_droplets/backup_list_policies_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py rename to src/gradient/types/gpu_droplets/backup_list_policies_params.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/gradient/types/gpu_droplets/backup_list_policies_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py rename to src/gradient/types/gpu_droplets/backup_list_policies_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_response.py b/src/gradient/types/gpu_droplets/backup_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_response.py rename to src/gradient/types/gpu_droplets/backup_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/gradient/types/gpu_droplets/backup_list_supported_policies_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py rename to src/gradient/types/gpu_droplets/backup_list_supported_policies_response.py diff --git a/src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/gradient/types/gpu_droplets/backup_retrieve_policy_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py rename to src/gradient/types/gpu_droplets/backup_retrieve_policy_response.py diff --git a/src/do_gradientai/types/gpu_droplets/current_utilization.py b/src/gradient/types/gpu_droplets/current_utilization.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/current_utilization.py rename to src/gradient/types/gpu_droplets/current_utilization.py diff --git a/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py rename to src/gradient/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py diff --git a/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py rename to src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py diff --git a/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py rename to src/gradient/types/gpu_droplets/destroy_with_associated_resource_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/gradient/types/gpu_droplets/destroyed_associated_resource.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py rename to src/gradient/types/gpu_droplets/destroyed_associated_resource.py diff --git a/src/do_gradientai/types/gpu_droplets/domains.py b/src/gradient/types/gpu_droplets/domains.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/domains.py rename to src/gradient/types/gpu_droplets/domains.py diff --git a/src/do_gradientai/types/gpu_droplets/domains_param.py b/src/gradient/types/gpu_droplets/domains_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/domains_param.py rename to src/gradient/types/gpu_droplets/domains_param.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall.py b/src/gradient/types/gpu_droplets/firewall.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall.py rename to src/gradient/types/gpu_droplets/firewall.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_create_params.py b/src/gradient/types/gpu_droplets/firewall_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_create_params.py rename to src/gradient/types/gpu_droplets/firewall_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_create_response.py b/src/gradient/types/gpu_droplets/firewall_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_create_response.py rename to src/gradient/types/gpu_droplets/firewall_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_list_params.py b/src/gradient/types/gpu_droplets/firewall_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_list_params.py rename to src/gradient/types/gpu_droplets/firewall_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_list_response.py b/src/gradient/types/gpu_droplets/firewall_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_list_response.py rename to src/gradient/types/gpu_droplets/firewall_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_param.py b/src/gradient/types/gpu_droplets/firewall_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_param.py rename to src/gradient/types/gpu_droplets/firewall_param.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/gradient/types/gpu_droplets/firewall_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py rename to src/gradient/types/gpu_droplets/firewall_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_update_params.py b/src/gradient/types/gpu_droplets/firewall_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_update_params.py rename to src/gradient/types/gpu_droplets/firewall_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewall_update_response.py b/src/gradient/types/gpu_droplets/firewall_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewall_update_response.py rename to src/gradient/types/gpu_droplets/firewall_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/__init__.py b/src/gradient/types/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/__init__.py rename to src/gradient/types/gpu_droplets/firewalls/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/gradient/types/gpu_droplets/firewalls/droplet_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py rename to src/gradient/types/gpu_droplets/firewalls/droplet_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/droplet_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py rename to src/gradient/types/gpu_droplets/firewalls/droplet_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/gradient/types/gpu_droplets/firewalls/rule_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py rename to src/gradient/types/gpu_droplets/firewalls/rule_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/rule_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py rename to src/gradient/types/gpu_droplets/firewalls/rule_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py rename to src/gradient/types/gpu_droplets/firewalls/tag_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py rename to src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip.py b/src/gradient/types/gpu_droplets/floating_ip.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip.py rename to src/gradient/types/gpu_droplets/floating_ip.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/gradient/types/gpu_droplets/floating_ip_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py rename to src/gradient/types/gpu_droplets/floating_ip_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/gradient/types/gpu_droplets/floating_ip_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py rename to src/gradient/types/gpu_droplets/floating_ip_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/gradient/types/gpu_droplets/floating_ip_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py rename to src/gradient/types/gpu_droplets/floating_ip_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/gradient/types/gpu_droplets/floating_ip_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py rename to src/gradient/types/gpu_droplets/floating_ip_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/gradient/types/gpu_droplets/floating_ip_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py rename to src/gradient/types/gpu_droplets/floating_ip_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/gradient/types/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py rename to src/gradient/types/gpu_droplets/floating_ips/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/gradient/types/gpu_droplets/floating_ips/action_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py rename to src/gradient/types/gpu_droplets/floating_ips/action_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/gradient/types/gpu_droplets/floating_ips/action_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py rename to src/gradient/types/gpu_droplets/floating_ips/action_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/gradient/types/gpu_droplets/floating_ips/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py rename to src/gradient/types/gpu_droplets/floating_ips/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/gradient/types/gpu_droplets/floating_ips/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py rename to src/gradient/types/gpu_droplets/floating_ips/action_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/forwarding_rule.py b/src/gradient/types/gpu_droplets/forwarding_rule.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/forwarding_rule.py rename to src/gradient/types/gpu_droplets/forwarding_rule.py diff --git a/src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/gradient/types/gpu_droplets/forwarding_rule_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py rename to src/gradient/types/gpu_droplets/forwarding_rule_param.py diff --git a/src/do_gradientai/types/gpu_droplets/glb_settings.py b/src/gradient/types/gpu_droplets/glb_settings.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/glb_settings.py rename to src/gradient/types/gpu_droplets/glb_settings.py diff --git a/src/do_gradientai/types/gpu_droplets/glb_settings_param.py b/src/gradient/types/gpu_droplets/glb_settings_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/glb_settings_param.py rename to src/gradient/types/gpu_droplets/glb_settings_param.py diff --git a/src/do_gradientai/types/gpu_droplets/health_check.py b/src/gradient/types/gpu_droplets/health_check.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/health_check.py rename to src/gradient/types/gpu_droplets/health_check.py diff --git a/src/do_gradientai/types/gpu_droplets/health_check_param.py b/src/gradient/types/gpu_droplets/health_check_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/health_check_param.py rename to src/gradient/types/gpu_droplets/health_check_param.py diff --git a/src/do_gradientai/types/gpu_droplets/image_create_params.py b/src/gradient/types/gpu_droplets/image_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_create_params.py rename to src/gradient/types/gpu_droplets/image_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/image_create_response.py b/src/gradient/types/gpu_droplets/image_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_create_response.py rename to src/gradient/types/gpu_droplets/image_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/image_list_params.py b/src/gradient/types/gpu_droplets/image_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_list_params.py rename to src/gradient/types/gpu_droplets/image_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/image_list_response.py b/src/gradient/types/gpu_droplets/image_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_list_response.py rename to src/gradient/types/gpu_droplets/image_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/image_retrieve_response.py b/src/gradient/types/gpu_droplets/image_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_retrieve_response.py rename to src/gradient/types/gpu_droplets/image_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/image_update_params.py b/src/gradient/types/gpu_droplets/image_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_update_params.py rename to src/gradient/types/gpu_droplets/image_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/image_update_response.py b/src/gradient/types/gpu_droplets/image_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/image_update_response.py rename to src/gradient/types/gpu_droplets/image_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/images/__init__.py b/src/gradient/types/gpu_droplets/images/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/images/__init__.py rename to src/gradient/types/gpu_droplets/images/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/images/action_create_params.py b/src/gradient/types/gpu_droplets/images/action_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/images/action_create_params.py rename to src/gradient/types/gpu_droplets/images/action_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/images/action_list_response.py b/src/gradient/types/gpu_droplets/images/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/images/action_list_response.py rename to src/gradient/types/gpu_droplets/images/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/lb_firewall.py b/src/gradient/types/gpu_droplets/lb_firewall.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/lb_firewall.py rename to src/gradient/types/gpu_droplets/lb_firewall.py diff --git a/src/do_gradientai/types/gpu_droplets/lb_firewall_param.py b/src/gradient/types/gpu_droplets/lb_firewall_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/lb_firewall_param.py rename to src/gradient/types/gpu_droplets/lb_firewall_param.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer.py b/src/gradient/types/gpu_droplets/load_balancer.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer.py rename to src/gradient/types/gpu_droplets/load_balancer.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/gradient/types/gpu_droplets/load_balancer_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py rename to src/gradient/types/gpu_droplets/load_balancer_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/gradient/types/gpu_droplets/load_balancer_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py rename to src/gradient/types/gpu_droplets/load_balancer_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/gradient/types/gpu_droplets/load_balancer_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py rename to src/gradient/types/gpu_droplets/load_balancer_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/gradient/types/gpu_droplets/load_balancer_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py rename to src/gradient/types/gpu_droplets/load_balancer_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/gradient/types/gpu_droplets/load_balancer_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py rename to src/gradient/types/gpu_droplets/load_balancer_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/gradient/types/gpu_droplets/load_balancer_update_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py rename to src/gradient/types/gpu_droplets/load_balancer_update_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/gradient/types/gpu_droplets/load_balancer_update_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py rename to src/gradient/types/gpu_droplets/load_balancer_update_response.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/gradient/types/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py rename to src/gradient/types/gpu_droplets/load_balancers/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/gradient/types/gpu_droplets/load_balancers/droplet_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py rename to src/gradient/types/gpu_droplets/load_balancers/droplet_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/gradient/types/gpu_droplets/load_balancers/droplet_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py rename to src/gradient/types/gpu_droplets/load_balancers/droplet_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py rename to src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py diff --git a/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py rename to src/gradient/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py diff --git a/src/do_gradientai/types/gpu_droplets/size_list_params.py b/src/gradient/types/gpu_droplets/size_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/size_list_params.py rename to src/gradient/types/gpu_droplets/size_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/size_list_response.py b/src/gradient/types/gpu_droplets/size_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/size_list_response.py rename to src/gradient/types/gpu_droplets/size_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/snapshot_list_params.py b/src/gradient/types/gpu_droplets/snapshot_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/snapshot_list_params.py rename to src/gradient/types/gpu_droplets/snapshot_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/snapshot_list_response.py b/src/gradient/types/gpu_droplets/snapshot_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/snapshot_list_response.py rename to src/gradient/types/gpu_droplets/snapshot_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/gradient/types/gpu_droplets/snapshot_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py rename to src/gradient/types/gpu_droplets/snapshot_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/sticky_sessions.py b/src/gradient/types/gpu_droplets/sticky_sessions.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/sticky_sessions.py rename to src/gradient/types/gpu_droplets/sticky_sessions.py diff --git a/src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/gradient/types/gpu_droplets/sticky_sessions_param.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py rename to src/gradient/types/gpu_droplets/sticky_sessions_param.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_create_params.py b/src/gradient/types/gpu_droplets/volume_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_create_params.py rename to src/gradient/types/gpu_droplets/volume_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_create_response.py b/src/gradient/types/gpu_droplets/volume_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_create_response.py rename to src/gradient/types/gpu_droplets/volume_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/gradient/types/gpu_droplets/volume_delete_by_name_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py rename to src/gradient/types/gpu_droplets/volume_delete_by_name_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_list_params.py b/src/gradient/types/gpu_droplets/volume_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_list_params.py rename to src/gradient/types/gpu_droplets/volume_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_list_response.py b/src/gradient/types/gpu_droplets/volume_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_list_response.py rename to src/gradient/types/gpu_droplets/volume_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/gradient/types/gpu_droplets/volume_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py rename to src/gradient/types/gpu_droplets/volume_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/__init__.py b/src/gradient/types/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/__init__.py rename to src/gradient/types/gpu_droplets/volumes/__init__.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py rename to src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py rename to src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py rename to src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py rename to src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/gradient/types/gpu_droplets/volumes/action_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py rename to src/gradient/types/gpu_droplets/volumes/action_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/gradient/types/gpu_droplets/volumes/action_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py rename to src/gradient/types/gpu_droplets/volumes/action_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/gradient/types/gpu_droplets/volumes/action_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py rename to src/gradient/types/gpu_droplets/volumes/action_retrieve_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/gradient/types/gpu_droplets/volumes/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py rename to src/gradient/types/gpu_droplets/volumes/action_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py rename to src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/gradient/types/gpu_droplets/volumes/snapshot_create_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py rename to src/gradient/types/gpu_droplets/volumes/snapshot_create_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/gradient/types/gpu_droplets/volumes/snapshot_list_params.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py rename to src/gradient/types/gpu_droplets/volumes/snapshot_list_params.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/gradient/types/gpu_droplets/volumes/snapshot_list_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py rename to src/gradient/types/gpu_droplets/volumes/snapshot_list_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/gradient/types/gpu_droplets/volumes/snapshot_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py rename to src/gradient/types/gpu_droplets/volumes/snapshot_retrieve_response.py diff --git a/src/do_gradientai/types/gpu_droplets/volumes/volume_action.py b/src/gradient/types/gpu_droplets/volumes/volume_action.py similarity index 100% rename from src/do_gradientai/types/gpu_droplets/volumes/volume_action.py rename to src/gradient/types/gpu_droplets/volumes/volume_action.py diff --git a/src/do_gradientai/types/inference/__init__.py b/src/gradient/types/inference/__init__.py similarity index 100% rename from src/do_gradientai/types/inference/__init__.py rename to src/gradient/types/inference/__init__.py diff --git a/src/do_gradientai/types/inference/api_key_create_params.py b/src/gradient/types/inference/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_params.py rename to src/gradient/types/inference/api_key_create_params.py diff --git a/src/do_gradientai/types/inference/api_key_create_response.py b/src/gradient/types/inference/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_response.py rename to src/gradient/types/inference/api_key_create_response.py diff --git a/src/do_gradientai/types/inference/api_key_delete_response.py b/src/gradient/types/inference/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_delete_response.py rename to src/gradient/types/inference/api_key_delete_response.py diff --git a/src/do_gradientai/types/inference/api_key_list_params.py b/src/gradient/types/inference/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_params.py rename to src/gradient/types/inference/api_key_list_params.py diff --git a/src/do_gradientai/types/inference/api_key_list_response.py b/src/gradient/types/inference/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_response.py rename to src/gradient/types/inference/api_key_list_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_params.py b/src/gradient/types/inference/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_params.py rename to src/gradient/types/inference/api_key_update_params.py diff --git a/src/do_gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradient/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_regenerate_response.py rename to src/gradient/types/inference/api_key_update_regenerate_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_response.py b/src/gradient/types/inference/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_response.py rename to src/gradient/types/inference/api_key_update_response.py diff --git a/src/do_gradientai/types/inference/api_model_api_key_info.py b/src/gradient/types/inference/api_model_api_key_info.py similarity index 100% rename from src/do_gradientai/types/inference/api_model_api_key_info.py rename to src/gradient/types/inference/api_model_api_key_info.py diff --git a/src/do_gradientai/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_params.py rename to src/gradient/types/knowledge_base_create_params.py diff --git a/src/do_gradientai/types/knowledge_base_create_response.py b/src/gradient/types/knowledge_base_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_response.py rename to src/gradient/types/knowledge_base_create_response.py diff --git a/src/do_gradientai/types/knowledge_base_delete_response.py b/src/gradient/types/knowledge_base_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_delete_response.py rename to src/gradient/types/knowledge_base_delete_response.py diff --git a/src/do_gradientai/types/knowledge_base_list_params.py b/src/gradient/types/knowledge_base_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_params.py rename to src/gradient/types/knowledge_base_list_params.py diff --git a/src/do_gradientai/types/knowledge_base_list_response.py b/src/gradient/types/knowledge_base_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_response.py rename to src/gradient/types/knowledge_base_list_response.py diff --git a/src/do_gradientai/types/knowledge_base_retrieve_response.py b/src/gradient/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_retrieve_response.py rename to src/gradient/types/knowledge_base_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_base_update_params.py b/src/gradient/types/knowledge_base_update_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_params.py rename to src/gradient/types/knowledge_base_update_params.py diff --git a/src/do_gradientai/types/knowledge_base_update_response.py b/src/gradient/types/knowledge_base_update_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_response.py rename to src/gradient/types/knowledge_base_update_response.py diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/__init__.py rename to src/gradient/types/knowledge_bases/__init__.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradient/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/gradient/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/gradient/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradient/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/gradient/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradient/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexing_job.py rename to src/gradient/types/knowledge_bases/api_indexing_job.py diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradient/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/gradient/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradient/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/gradient/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/gradient/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradient/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/gradient/types/knowledge_bases/aws_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradient/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_params.py rename to src/gradient/types/knowledge_bases/data_source_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradient/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_response.py rename to src/gradient/types/knowledge_bases/data_source_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradient/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/gradient/types/knowledge_bases/data_source_delete_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradient/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_params.py rename to src/gradient/types/knowledge_bases/data_source_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradient/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_response.py rename to src/gradient/types/knowledge_bases/data_source_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradient/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/gradient/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradient/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/gradient/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradient/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/gradient/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradient/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/gradient/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/gradient/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/gradient/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/gradient/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/gradient/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/do_gradientai/types/model_list_params.py b/src/gradient/types/model_list_params.py similarity index 100% rename from src/do_gradientai/types/model_list_params.py rename to src/gradient/types/model_list_params.py diff --git a/src/do_gradientai/types/model_list_response.py b/src/gradient/types/model_list_response.py similarity index 100% rename from src/do_gradientai/types/model_list_response.py rename to src/gradient/types/model_list_response.py diff --git a/src/do_gradientai/types/models/__init__.py b/src/gradient/types/models/__init__.py similarity index 100% rename from src/do_gradientai/types/models/__init__.py rename to src/gradient/types/models/__init__.py diff --git a/src/do_gradientai/types/models/providers/__init__.py b/src/gradient/types/models/providers/__init__.py similarity index 100% rename from src/do_gradientai/types/models/providers/__init__.py rename to src/gradient/types/models/providers/__init__.py diff --git a/src/do_gradientai/types/models/providers/anthropic_create_params.py b/src/gradient/types/models/providers/anthropic_create_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_create_params.py rename to src/gradient/types/models/providers/anthropic_create_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_create_response.py b/src/gradient/types/models/providers/anthropic_create_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_create_response.py rename to src/gradient/types/models/providers/anthropic_create_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_delete_response.py b/src/gradient/types/models/providers/anthropic_delete_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_delete_response.py rename to src/gradient/types/models/providers/anthropic_delete_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py b/src/gradient/types/models/providers/anthropic_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_agents_params.py rename to src/gradient/types/models/providers/anthropic_list_agents_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py b/src/gradient/types/models/providers/anthropic_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_agents_response.py rename to src/gradient/types/models/providers/anthropic_list_agents_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_params.py b/src/gradient/types/models/providers/anthropic_list_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_params.py rename to src/gradient/types/models/providers/anthropic_list_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_response.py b/src/gradient/types/models/providers/anthropic_list_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_response.py rename to src/gradient/types/models/providers/anthropic_list_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py b/src/gradient/types/models/providers/anthropic_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_retrieve_response.py rename to src/gradient/types/models/providers/anthropic_retrieve_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_update_params.py b/src/gradient/types/models/providers/anthropic_update_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_update_params.py rename to src/gradient/types/models/providers/anthropic_update_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_update_response.py b/src/gradient/types/models/providers/anthropic_update_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_update_response.py rename to src/gradient/types/models/providers/anthropic_update_response.py diff --git a/src/do_gradientai/types/models/providers/openai_create_params.py b/src/gradient/types/models/providers/openai_create_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_create_params.py rename to src/gradient/types/models/providers/openai_create_params.py diff --git a/src/do_gradientai/types/models/providers/openai_create_response.py b/src/gradient/types/models/providers/openai_create_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_create_response.py rename to src/gradient/types/models/providers/openai_create_response.py diff --git a/src/do_gradientai/types/models/providers/openai_delete_response.py b/src/gradient/types/models/providers/openai_delete_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_delete_response.py rename to src/gradient/types/models/providers/openai_delete_response.py diff --git a/src/do_gradientai/types/models/providers/openai_list_params.py b/src/gradient/types/models/providers/openai_list_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_list_params.py rename to src/gradient/types/models/providers/openai_list_params.py diff --git a/src/do_gradientai/types/models/providers/openai_list_response.py b/src/gradient/types/models/providers/openai_list_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_list_response.py rename to src/gradient/types/models/providers/openai_list_response.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/gradient/types/models/providers/openai_retrieve_agents_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py rename to src/gradient/types/models/providers/openai_retrieve_agents_params.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/gradient/types/models/providers/openai_retrieve_agents_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py rename to src/gradient/types/models/providers/openai_retrieve_agents_response.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_response.py b/src/gradient/types/models/providers/openai_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_response.py rename to src/gradient/types/models/providers/openai_retrieve_response.py diff --git a/src/do_gradientai/types/models/providers/openai_update_params.py b/src/gradient/types/models/providers/openai_update_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_update_params.py rename to src/gradient/types/models/providers/openai_update_params.py diff --git a/src/do_gradientai/types/models/providers/openai_update_response.py b/src/gradient/types/models/providers/openai_update_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_update_response.py rename to src/gradient/types/models/providers/openai_update_response.py diff --git a/src/do_gradientai/types/region_list_params.py b/src/gradient/types/region_list_params.py similarity index 100% rename from src/do_gradientai/types/region_list_params.py rename to src/gradient/types/region_list_params.py diff --git a/src/do_gradientai/types/region_list_response.py b/src/gradient/types/region_list_response.py similarity index 100% rename from src/do_gradientai/types/region_list_response.py rename to src/gradient/types/region_list_response.py diff --git a/src/do_gradientai/types/shared/__init__.py b/src/gradient/types/shared/__init__.py similarity index 100% rename from src/do_gradientai/types/shared/__init__.py rename to src/gradient/types/shared/__init__.py diff --git a/src/do_gradientai/types/shared/action.py b/src/gradient/types/shared/action.py similarity index 100% rename from src/do_gradientai/types/shared/action.py rename to src/gradient/types/shared/action.py diff --git a/src/do_gradientai/types/shared/action_link.py b/src/gradient/types/shared/action_link.py similarity index 100% rename from src/do_gradientai/types/shared/action_link.py rename to src/gradient/types/shared/action_link.py diff --git a/src/do_gradientai/types/shared/api_links.py b/src/gradient/types/shared/api_links.py similarity index 100% rename from src/do_gradientai/types/shared/api_links.py rename to src/gradient/types/shared/api_links.py diff --git a/src/do_gradientai/types/shared/api_meta.py b/src/gradient/types/shared/api_meta.py similarity index 100% rename from src/do_gradientai/types/shared/api_meta.py rename to src/gradient/types/shared/api_meta.py diff --git a/src/do_gradientai/types/shared/backward_links.py b/src/gradient/types/shared/backward_links.py similarity index 100% rename from src/do_gradientai/types/shared/backward_links.py rename to src/gradient/types/shared/backward_links.py diff --git a/src/do_gradientai/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py similarity index 100% rename from src/do_gradientai/types/shared/chat_completion_chunk.py rename to src/gradient/types/shared/chat_completion_chunk.py diff --git a/src/do_gradientai/types/shared/chat_completion_token_logprob.py b/src/gradient/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/do_gradientai/types/shared/chat_completion_token_logprob.py rename to src/gradient/types/shared/chat_completion_token_logprob.py diff --git a/src/do_gradientai/types/shared/completion_usage.py b/src/gradient/types/shared/completion_usage.py similarity index 100% rename from src/do_gradientai/types/shared/completion_usage.py rename to src/gradient/types/shared/completion_usage.py diff --git a/src/do_gradientai/types/shared/disk_info.py b/src/gradient/types/shared/disk_info.py similarity index 100% rename from src/do_gradientai/types/shared/disk_info.py rename to src/gradient/types/shared/disk_info.py diff --git a/src/do_gradientai/types/shared/droplet.py b/src/gradient/types/shared/droplet.py similarity index 100% rename from src/do_gradientai/types/shared/droplet.py rename to src/gradient/types/shared/droplet.py diff --git a/src/do_gradientai/types/shared/droplet_next_backup_window.py b/src/gradient/types/shared/droplet_next_backup_window.py similarity index 100% rename from src/do_gradientai/types/shared/droplet_next_backup_window.py rename to src/gradient/types/shared/droplet_next_backup_window.py diff --git a/src/do_gradientai/types/shared/firewall_rule_target.py b/src/gradient/types/shared/firewall_rule_target.py similarity index 100% rename from src/do_gradientai/types/shared/firewall_rule_target.py rename to src/gradient/types/shared/firewall_rule_target.py diff --git a/src/do_gradientai/types/shared/forward_links.py b/src/gradient/types/shared/forward_links.py similarity index 100% rename from src/do_gradientai/types/shared/forward_links.py rename to src/gradient/types/shared/forward_links.py diff --git a/src/do_gradientai/types/shared/garbage_collection.py b/src/gradient/types/shared/garbage_collection.py similarity index 100% rename from src/do_gradientai/types/shared/garbage_collection.py rename to src/gradient/types/shared/garbage_collection.py diff --git a/src/do_gradientai/types/shared/gpu_info.py b/src/gradient/types/shared/gpu_info.py similarity index 100% rename from src/do_gradientai/types/shared/gpu_info.py rename to src/gradient/types/shared/gpu_info.py diff --git a/src/do_gradientai/types/shared/image.py b/src/gradient/types/shared/image.py similarity index 100% rename from src/do_gradientai/types/shared/image.py rename to src/gradient/types/shared/image.py diff --git a/src/do_gradientai/types/shared/kernel.py b/src/gradient/types/shared/kernel.py similarity index 100% rename from src/do_gradientai/types/shared/kernel.py rename to src/gradient/types/shared/kernel.py diff --git a/src/do_gradientai/types/shared/meta_properties.py b/src/gradient/types/shared/meta_properties.py similarity index 100% rename from src/do_gradientai/types/shared/meta_properties.py rename to src/gradient/types/shared/meta_properties.py diff --git a/src/do_gradientai/types/shared/network_v4.py b/src/gradient/types/shared/network_v4.py similarity index 100% rename from src/do_gradientai/types/shared/network_v4.py rename to src/gradient/types/shared/network_v4.py diff --git a/src/do_gradientai/types/shared/network_v6.py b/src/gradient/types/shared/network_v6.py similarity index 100% rename from src/do_gradientai/types/shared/network_v6.py rename to src/gradient/types/shared/network_v6.py diff --git a/src/do_gradientai/types/shared/page_links.py b/src/gradient/types/shared/page_links.py similarity index 100% rename from src/do_gradientai/types/shared/page_links.py rename to src/gradient/types/shared/page_links.py diff --git a/src/do_gradientai/types/shared/region.py b/src/gradient/types/shared/region.py similarity index 100% rename from src/do_gradientai/types/shared/region.py rename to src/gradient/types/shared/region.py diff --git a/src/do_gradientai/types/shared/size.py b/src/gradient/types/shared/size.py similarity index 100% rename from src/do_gradientai/types/shared/size.py rename to src/gradient/types/shared/size.py diff --git a/src/do_gradientai/types/shared/snapshots.py b/src/gradient/types/shared/snapshots.py similarity index 100% rename from src/do_gradientai/types/shared/snapshots.py rename to src/gradient/types/shared/snapshots.py diff --git a/src/do_gradientai/types/shared/subscription.py b/src/gradient/types/shared/subscription.py similarity index 100% rename from src/do_gradientai/types/shared/subscription.py rename to src/gradient/types/shared/subscription.py diff --git a/src/do_gradientai/types/shared/subscription_tier_base.py b/src/gradient/types/shared/subscription_tier_base.py similarity index 100% rename from src/do_gradientai/types/shared/subscription_tier_base.py rename to src/gradient/types/shared/subscription_tier_base.py diff --git a/src/do_gradientai/types/shared/vpc_peering.py b/src/gradient/types/shared/vpc_peering.py similarity index 100% rename from src/do_gradientai/types/shared/vpc_peering.py rename to src/gradient/types/shared/vpc_peering.py diff --git a/src/do_gradientai/types/shared_params/__init__.py b/src/gradient/types/shared_params/__init__.py similarity index 100% rename from src/do_gradientai/types/shared_params/__init__.py rename to src/gradient/types/shared_params/__init__.py diff --git a/src/do_gradientai/types/shared_params/firewall_rule_target.py b/src/gradient/types/shared_params/firewall_rule_target.py similarity index 100% rename from src/do_gradientai/types/shared_params/firewall_rule_target.py rename to src/gradient/types/shared_params/firewall_rule_target.py diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index d80b5c09..695a374b 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.chat import CompletionCreateResponse +from gradient.types.agents.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestCompletions: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: completion = client.agents.chat.completions.create( messages=[ { @@ -33,7 +33,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: completion = client.agents.chat.completions.create( messages=[ { @@ -73,7 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -91,7 +91,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.agents.chat.completions.with_streaming_response.create( messages=[ { @@ -111,7 +111,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: completion_stream = client.agents.chat.completions.create( messages=[ { @@ -126,7 +126,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: completion_stream = client.agents.chat.completions.create( messages=[ { @@ -166,7 +166,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -184,7 +184,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.agents.chat.completions.with_streaming_response.create( messages=[ { @@ -211,7 +211,7 @@ class TestAsyncCompletions: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.agents.chat.completions.create( messages=[ { @@ -225,7 +225,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.agents.chat.completions.create( messages=[ { @@ -265,7 +265,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -283,7 +283,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( messages=[ { @@ -303,7 +303,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.agents.chat.completions.create( messages=[ { @@ -318,7 +318,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.agents.chat.completions.create( messages=[ { @@ -358,7 +358,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -376,7 +376,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( messages=[ { diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py index aff153a6..a8ca5724 100644 --- a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py +++ b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics.anthropic import ( +from gradient.types.agents.evaluation_metrics.anthropic import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, @@ -26,13 +26,13 @@ class TestKeys: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.create( api_key='"sk-ant-12345678901234567890123456789012"', name='"Production Key"', @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create() assert response.is_closed is True @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.retrieve( "api_key_uuid", ) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -97,7 +97,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( "", @@ -105,7 +105,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -113,7 +113,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -124,7 +124,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -136,7 +136,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -150,7 +150,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( path_api_key_uuid="", @@ -158,13 +158,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list( page=0, per_page=0, @@ -173,7 +173,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list() assert response.is_closed is True @@ -183,7 +183,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,7 +195,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.delete( "api_key_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( "api_key_uuid", ) @@ -215,7 +215,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -229,7 +229,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( "", @@ -237,7 +237,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_agents(self, client: GradientAI) -> None: + def test_method_list_agents(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -245,7 +245,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + def test_method_list_agents_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -255,7 +255,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_agents(self, client: GradientAI) -> None: + def test_raw_response_list_agents(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -267,7 +267,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_agents(self, client: GradientAI) -> None: + def test_streaming_response_list_agents(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -281,7 +281,7 @@ def test_streaming_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list_agents(self, client: GradientAI) -> None: + def test_path_params_list_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( uuid="", @@ -295,13 +295,13 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.create( api_key='"sk-ant-12345678901234567890123456789012"', name='"Production Key"', @@ -310,7 +310,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create() assert response.is_closed is True @@ -320,7 +320,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -332,7 +332,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.retrieve( "api_key_uuid", ) @@ -340,7 +340,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -352,7 +352,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -366,7 +366,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( "", @@ -374,7 +374,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -382,7 +382,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -393,7 +393,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -405,7 +405,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -419,7 +419,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( path_api_key_uuid="", @@ -427,13 +427,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list( page=0, per_page=0, @@ -442,7 +442,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list() assert response.is_closed is True @@ -452,7 +452,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -464,7 +464,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.delete( "api_key_uuid", ) @@ -472,7 +472,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( "api_key_uuid", ) @@ -484,7 +484,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -498,7 +498,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( "", @@ -506,7 +506,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_agents(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -514,7 +514,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -524,7 +524,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -536,7 +536,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -550,7 +550,7 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( uuid="", diff --git a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py index 08404acc..5a22b1bc 100644 --- a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py +++ b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics.openai import ( +from gradient.types.agents.evaluation_metrics.openai import ( KeyListResponse, KeyCreateResponse, KeyDeleteResponse, @@ -26,13 +26,13 @@ class TestKeys: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.create( api_key='"sk-proj--123456789098765432123456789"', name='"Production Key"', @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.create() assert response.is_closed is True @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.retrieve( "api_key_uuid", ) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -97,7 +97,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( "", @@ -105,7 +105,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -113,7 +113,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -124,7 +124,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -136,7 +136,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -150,7 +150,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.agents.evaluation_metrics.openai.keys.with_raw_response.update( path_api_key_uuid="", @@ -158,13 +158,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list( page=0, per_page=0, @@ -173,7 +173,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list() assert response.is_closed is True @@ -183,7 +183,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,7 +195,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.delete( "api_key_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( "api_key_uuid", ) @@ -215,7 +215,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -229,7 +229,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( "", @@ -237,7 +237,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_agents(self, client: GradientAI) -> None: + def test_method_list_agents(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -245,7 +245,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + def test_method_list_agents_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -255,7 +255,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_agents(self, client: GradientAI) -> None: + def test_raw_response_list_agents(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -267,7 +267,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_agents(self, client: GradientAI) -> None: + def test_streaming_response_list_agents(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -281,7 +281,7 @@ def test_streaming_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list_agents(self, client: GradientAI) -> None: + def test_path_params_list_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( uuid="", @@ -295,13 +295,13 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.create( api_key='"sk-proj--123456789098765432123456789"', name='"Production Key"', @@ -310,7 +310,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.create() assert response.is_closed is True @@ -320,7 +320,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -332,7 +332,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.retrieve( "api_key_uuid", ) @@ -340,7 +340,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( "api_key_uuid", ) @@ -352,7 +352,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -366,7 +366,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( "", @@ -374,7 +374,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -382,7 +382,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -393,7 +393,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -405,7 +405,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -419,7 +419,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update( path_api_key_uuid="", @@ -427,13 +427,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list( page=0, per_page=0, @@ -442,7 +442,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list() assert response.is_closed is True @@ -452,7 +452,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -464,7 +464,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.delete( "api_key_uuid", ) @@ -472,7 +472,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( "api_key_uuid", ) @@ -484,7 +484,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -498,7 +498,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( "", @@ -506,7 +506,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_agents(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -514,7 +514,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -524,7 +524,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -536,7 +536,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -550,7 +550,7 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( uuid="", diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 27ab4a27..624e5288 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ModelListResponse +from gradient.types.agents.evaluation_metrics import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,13 +19,13 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: model = client.agents.evaluation_metrics.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: model = client.agents.evaluation_metrics.models.list( page=0, per_page=0, @@ -36,7 +36,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.models.with_raw_response.list() assert response.is_closed is True @@ -46,7 +46,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,13 +64,13 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: model = await async_client.agents.evaluation_metrics.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: model = await async_client.agents.evaluation_metrics.models.list( page=0, per_page=0, @@ -81,7 +81,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.models.with_raw_response.list() assert response.is_closed is True @@ -91,7 +91,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index 2728393e..608406bf 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ( +from gradient.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, @@ -26,13 +26,13 @@ class TestWorkspaces: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.create() assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.create( agent_uuids=["example string"], description='"example string"', @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.create() assert response.is_closed is True @@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.retrieve( "workspace_uuid", ) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( "workspace_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( "workspace_uuid", ) as response: @@ -98,7 +98,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( "", @@ -106,7 +106,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -114,7 +114,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', description='"example string"', @@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -137,7 +137,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -151,7 +151,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): client.agents.evaluation_metrics.workspaces.with_raw_response.update( path_workspace_uuid="", @@ -159,13 +159,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.list() assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.list() assert response.is_closed is True @@ -175,7 +175,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -187,7 +187,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.delete( "workspace_uuid", ) @@ -195,7 +195,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.delete( "workspace_uuid", ) @@ -207,7 +207,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( "workspace_uuid", ) as response: @@ -221,7 +221,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): client.agents.evaluation_metrics.workspaces.with_raw_response.delete( "", @@ -229,7 +229,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_evaluation_test_cases(self, client: GradientAI) -> None: + def test_method_list_evaluation_test_cases(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( "workspace_uuid", ) @@ -237,7 +237,7 @@ def test_method_list_evaluation_test_cases(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_evaluation_test_cases(self, client: GradientAI) -> None: + def test_raw_response_list_evaluation_test_cases(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( "workspace_uuid", ) @@ -249,7 +249,7 @@ def test_raw_response_list_evaluation_test_cases(self, client: GradientAI) -> No @pytest.mark.skip() @parametrize - def test_streaming_response_list_evaluation_test_cases(self, client: GradientAI) -> None: + def test_streaming_response_list_evaluation_test_cases(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( "workspace_uuid", ) as response: @@ -263,7 +263,7 @@ def test_streaming_response_list_evaluation_test_cases(self, client: GradientAI) @pytest.mark.skip() @parametrize - def test_path_params_list_evaluation_test_cases(self, client: GradientAI) -> None: + def test_path_params_list_evaluation_test_cases(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( "", @@ -277,13 +277,13 @@ class TestAsyncWorkspaces: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create() assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create( agent_uuids=["example string"], description='"example string"', @@ -293,7 +293,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.create() assert response.is_closed is True @@ -303,7 +303,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -315,7 +315,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.retrieve( "workspace_uuid", ) @@ -323,7 +323,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( "workspace_uuid", ) @@ -335,7 +335,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( "workspace_uuid", ) as response: @@ -349,7 +349,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( "", @@ -357,7 +357,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -365,7 +365,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', description='"example string"', @@ -376,7 +376,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -388,7 +388,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -402,7 +402,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( path_workspace_uuid="", @@ -410,13 +410,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.list() assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list() assert response.is_closed is True @@ -426,7 +426,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -438,7 +438,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.delete( "workspace_uuid", ) @@ -446,7 +446,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete( "workspace_uuid", ) @@ -458,7 +458,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( "workspace_uuid", ) as response: @@ -472,7 +472,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete( "", @@ -480,7 +480,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( "workspace_uuid", ) @@ -488,7 +488,7 @@ async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize - async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( "workspace_uuid", ) @@ -500,7 +500,7 @@ async def test_raw_response_list_evaluation_test_cases(self, async_client: Async @pytest.mark.skip() @parametrize - async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( "workspace_uuid", ) as response: @@ -514,7 +514,7 @@ async def test_streaming_response_list_evaluation_test_cases(self, async_client: @pytest.mark.skip() @parametrize - async def test_path_params_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( "", diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 37d39018..b70f9c58 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradient.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -22,7 +22,7 @@ class TestAgents: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -30,7 +30,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', only_deployed=True, @@ -41,7 +41,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -53,7 +53,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -67,7 +67,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( workspace_uuid="", @@ -75,7 +75,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_move(self, client: GradientAI) -> None: + def test_method_move(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -83,7 +83,7 @@ def test_method_move(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_move_with_all_params(self, client: GradientAI) -> None: + def test_method_move_with_all_params(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuids=["example string"], @@ -93,7 +93,7 @@ def test_method_move_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_move(self, client: GradientAI) -> None: + def test_raw_response_move(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -105,7 +105,7 @@ def test_raw_response_move(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_move(self, client: GradientAI) -> None: + def test_streaming_response_move(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -119,7 +119,7 @@ def test_streaming_response_move(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_move(self, client: GradientAI) -> None: + def test_path_params_move(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( path_workspace_uuid="", @@ -133,7 +133,7 @@ class TestAsyncAgents: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -141,7 +141,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', only_deployed=True, @@ -152,7 +152,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -164,7 +164,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -178,7 +178,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( workspace_uuid="", @@ -186,7 +186,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_move(self, async_client: AsyncGradientAI) -> None: + async def test_method_move(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -194,7 +194,7 @@ async def test_method_move(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_move_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuids=["example string"], @@ -204,7 +204,7 @@ async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_move(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -216,7 +216,7 @@ async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_move(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -230,7 +230,7 @@ async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_move(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_move(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( path_workspace_uuid="", diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 1e5275fe..4b80fc54 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -25,7 +25,7 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: api_key = client.agents.api_keys.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: api_key = client.agents.api_keys.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_agent_uuid='"12345678-1234-1234-1234-123456789012"', @@ -43,7 +43,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -55,7 +55,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_create(self, client: GradientAI) -> None: + def test_path_params_create(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.create( path_agent_uuid="", @@ -77,7 +77,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: api_key = client.agents.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -86,7 +86,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: api_key = client.agents.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -98,7 +98,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -111,7 +111,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -126,7 +126,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -141,7 +141,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: api_key = client.agents.api_keys.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -149,7 +149,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: api_key = client.agents.api_keys.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -159,7 +159,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -171,7 +171,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -185,7 +185,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.list( agent_uuid="", @@ -193,7 +193,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: api_key = client.agents.api_keys.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -202,7 +202,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -215,7 +215,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -230,7 +230,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -245,7 +245,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_regenerate(self, client: GradientAI) -> None: + def test_method_regenerate(self, client: Gradient) -> None: api_key = client.agents.api_keys.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -254,7 +254,7 @@ def test_method_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_regenerate(self, client: GradientAI) -> None: + def test_raw_response_regenerate(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -267,7 +267,7 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_regenerate(self, client: GradientAI) -> None: + def test_streaming_response_regenerate(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -282,7 +282,7 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_regenerate(self, client: GradientAI) -> None: + def test_path_params_regenerate(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -303,7 +303,7 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -311,7 +311,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_agent_uuid='"12345678-1234-1234-1234-123456789012"', @@ -321,7 +321,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -333,7 +333,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -347,7 +347,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="", @@ -355,7 +355,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -364,7 +364,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -376,7 +376,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -389,7 +389,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -404,7 +404,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -419,7 +419,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -427,7 +427,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -437,7 +437,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -449,7 +449,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.list( agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -463,7 +463,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.list( agent_uuid="", @@ -471,7 +471,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -480,7 +480,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -493,7 +493,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -508,7 +508,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -523,7 +523,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_method_regenerate(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -532,7 +532,7 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_regenerate(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -545,7 +545,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_regenerate(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -560,7 +560,7 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_regenerate(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 56edd598..f60c4720 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -22,13 +22,13 @@ class TestEvaluationDatasets: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create( file_upload_dataset={ "original_file_name": '"example name"', @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_datasets.with_raw_response.create() assert response.is_closed is True @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_datasets.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + def test_method_create_file_upload_presigned_urls(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls() assert_matches_type( EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] @@ -71,7 +71,7 @@ def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_method_create_file_upload_presigned_urls_with_all_params(self, client: GradientAI) -> None: + def test_method_create_file_upload_presigned_urls_with_all_params(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { @@ -86,7 +86,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: @pytest.mark.skip() @parametrize - def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + def test_raw_response_create_file_upload_presigned_urls(self, client: Gradient) -> None: response = client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() assert response.is_closed is True @@ -98,7 +98,7 @@ def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI @pytest.mark.skip() @parametrize - def test_streaming_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + def test_streaming_response_create_file_upload_presigned_urls(self, client: Gradient) -> None: with client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -118,13 +118,13 @@ class TestAsyncEvaluationDatasets: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create( file_upload_dataset={ "original_file_name": '"example name"', @@ -137,7 +137,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_datasets.with_raw_response.create() assert response.is_closed is True @@ -147,7 +147,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_datasets.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -159,7 +159,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls() assert_matches_type( EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] @@ -167,9 +167,7 @@ async def test_method_create_file_upload_presigned_urls(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_method_create_file_upload_presigned_urls_with_all_params( - self, async_client: AsyncGradientAI - ) -> None: + async def test_method_create_file_upload_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { @@ -184,7 +182,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params( @pytest.mark.skip() @parametrize - async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() assert response.is_closed is True @@ -196,7 +194,7 @@ async def test_raw_response_create_file_upload_presigned_urls(self, async_client @pytest.mark.skip() @parametrize - async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None: async with ( async_client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() ) as response: diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index 303d85d6..612f4228 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) @@ -22,13 +22,13 @@ class TestEvaluationMetrics: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: evaluation_metric = client.agents.evaluation_metrics.list() assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.with_raw_response.list() assert response.is_closed is True @@ -38,7 +38,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -50,13 +50,13 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_regions(self, client: GradientAI) -> None: + def test_method_list_regions(self, client: Gradient) -> None: evaluation_metric = client.agents.evaluation_metrics.list_regions() assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_regions_with_all_params(self, client: GradientAI) -> None: + def test_method_list_regions_with_all_params(self, client: Gradient) -> None: evaluation_metric = client.agents.evaluation_metrics.list_regions( serves_batch=True, serves_inference=True, @@ -65,7 +65,7 @@ def test_method_list_regions_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_regions(self, client: GradientAI) -> None: + def test_raw_response_list_regions(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.with_raw_response.list_regions() assert response.is_closed is True @@ -75,7 +75,7 @@ def test_raw_response_list_regions(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_regions(self, client: GradientAI) -> None: + def test_streaming_response_list_regions(self, client: Gradient) -> None: with client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -93,13 +93,13 @@ class TestAsyncEvaluationMetrics: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: evaluation_metric = await async_client.agents.evaluation_metrics.list() assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.with_raw_response.list() assert response.is_closed is True @@ -109,7 +109,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -121,13 +121,13 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_list_regions(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_regions(self, async_client: AsyncGradient) -> None: evaluation_metric = await async_client.agents.evaluation_metrics.list_regions() assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_regions_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_regions_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_metric = await async_client.agents.evaluation_metrics.list_regions( serves_batch=True, serves_inference=True, @@ -136,7 +136,7 @@ async def test_method_list_regions_with_all_params(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_raw_response_list_regions(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_regions(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.with_raw_response.list_regions() assert response.is_closed is True @@ -146,7 +146,7 @@ async def test_raw_response_list_regions(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_regions(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_regions(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 9d443f16..be842cbc 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, @@ -24,13 +24,13 @@ class TestEvaluationRuns: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.create() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.create( agent_uuids=["example string"], run_name="Evaluation Run Name", @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.create() assert response.is_closed is True @@ -50,7 +50,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,7 +62,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.retrieve( "evaluation_run_uuid", ) @@ -70,7 +70,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve( "evaluation_run_uuid", ) @@ -82,7 +82,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve( "evaluation_run_uuid", ) as response: @@ -96,7 +96,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.retrieve( "", @@ -104,7 +104,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_results(self, client: GradientAI) -> None: + def test_method_list_results(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -112,7 +112,7 @@ def test_method_list_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_results_with_all_params(self, client: GradientAI) -> None: + def test_method_list_results_with_all_params(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -122,7 +122,7 @@ def test_method_list_results_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_results(self, client: GradientAI) -> None: + def test_raw_response_list_results(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -134,7 +134,7 @@ def test_raw_response_list_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_results(self, client: GradientAI) -> None: + def test_streaming_response_list_results(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -148,7 +148,7 @@ def test_streaming_response_list_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list_results(self, client: GradientAI) -> None: + def test_path_params_list_results(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.list_results( evaluation_run_uuid="", @@ -156,7 +156,7 @@ def test_path_params_list_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_results(self, client: GradientAI) -> None: + def test_method_retrieve_results(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.retrieve_results( prompt_id=1, evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -165,7 +165,7 @@ def test_method_retrieve_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve_results(self, client: GradientAI) -> None: + def test_raw_response_retrieve_results(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve_results( prompt_id=1, evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -178,7 +178,7 @@ def test_raw_response_retrieve_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: + def test_streaming_response_retrieve_results(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve_results( prompt_id=1, evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -193,7 +193,7 @@ def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve_results(self, client: GradientAI) -> None: + def test_path_params_retrieve_results(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.retrieve_results( prompt_id=1, @@ -208,13 +208,13 @@ class TestAsyncEvaluationRuns: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.create() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.create( agent_uuids=["example string"], run_name="Evaluation Run Name", @@ -224,7 +224,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.create() assert response.is_closed is True @@ -234,7 +234,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -246,7 +246,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve( "evaluation_run_uuid", ) @@ -254,7 +254,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve( "evaluation_run_uuid", ) @@ -266,7 +266,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve( "evaluation_run_uuid", ) as response: @@ -280,7 +280,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.retrieve( "", @@ -288,7 +288,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_results(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -296,7 +296,7 @@ async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_results_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_results_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -306,7 +306,7 @@ async def test_method_list_results_with_all_params(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_results(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -318,7 +318,7 @@ async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_results(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_results(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.list_results( evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -332,7 +332,7 @@ async def test_streaming_response_list_results(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_results(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.list_results( evaluation_run_uuid="", @@ -340,7 +340,7 @@ async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_results(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve_results( prompt_id=1, evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -349,7 +349,7 @@ async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve_results(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( prompt_id=1, evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -362,7 +362,7 @@ async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve_results(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results( prompt_id=1, evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -377,7 +377,7 @@ async def test_streaming_response_retrieve_results(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_path_params_retrieve_results(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve_results(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( prompt_id=1, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index ae986abc..2860aa2c 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, @@ -25,13 +25,13 @@ class TestEvaluationTestCases: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create( dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', description='"example string"', @@ -49,7 +49,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.create() assert response.is_closed is True @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -71,7 +71,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -79,7 +79,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: + def test_method_retrieve_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, @@ -88,7 +88,7 @@ def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -100,7 +100,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -114,7 +114,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): client.agents.evaluation_test_cases.with_raw_response.retrieve( test_case_uuid="", @@ -122,7 +122,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -130,7 +130,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -149,7 +149,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -161,7 +161,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -175,7 +175,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid="", @@ -183,13 +183,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list() assert response.is_closed is True @@ -199,7 +199,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -211,7 +211,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_evaluation_runs(self, client: GradientAI) -> None: + def test_method_list_evaluation_runs(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -219,7 +219,7 @@ def test_method_list_evaluation_runs(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: + def test_method_list_evaluation_runs_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, @@ -228,7 +228,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) - @pytest.mark.skip() @parametrize - def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: + def test_raw_response_list_evaluation_runs(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -240,7 +240,7 @@ def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: + def test_streaming_response_list_evaluation_runs(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -254,7 +254,7 @@ def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> No @pytest.mark.skip() @parametrize - def test_path_params_list_evaluation_runs(self, client: GradientAI) -> None: + def test_path_params_list_evaluation_runs(self, client: Gradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" ): @@ -270,13 +270,13 @@ class TestAsyncEvaluationTestCases: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create( dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', description='"example string"', @@ -294,7 +294,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.create() assert response.is_closed is True @@ -304,7 +304,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -316,7 +316,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -324,7 +324,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, @@ -333,7 +333,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -345,7 +345,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -359,7 +359,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( test_case_uuid="", @@ -367,7 +367,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -375,7 +375,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -394,7 +394,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -406,7 +406,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -420,7 +420,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): await async_client.agents.evaluation_test_cases.with_raw_response.update( path_test_case_uuid="", @@ -428,13 +428,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list() assert response.is_closed is True @@ -444,7 +444,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -456,7 +456,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_evaluation_runs(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -464,7 +464,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, @@ -473,7 +473,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A @pytest.mark.skip() @parametrize - async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -485,7 +485,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize - async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -499,7 +499,7 @@ async def test_streaming_response_list_evaluation_runs(self, async_client: Async @pytest.mark.skip() @parametrize - async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" ): diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 624446e0..0ba54432 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, @@ -23,7 +23,7 @@ class TestFunctions: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: function = client.agents.functions.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -31,7 +31,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: function = client.agents.functions.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_agent_uuid='"12345678-1234-1234-1234-123456789012"', @@ -46,7 +46,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.functions.with_raw_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -58,7 +58,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.functions.with_streaming_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -72,7 +72,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_create(self, client: GradientAI) -> None: + def test_path_params_create(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.functions.with_raw_response.create( path_agent_uuid="", @@ -80,7 +80,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: function = client.agents.functions.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -89,7 +89,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: function = client.agents.functions.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -106,7 +106,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.functions.with_raw_response.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -119,7 +119,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.functions.with_streaming_response.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -134,7 +134,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.functions.with_raw_response.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -149,7 +149,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: function = client.agents.functions.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -158,7 +158,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.functions.with_raw_response.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -171,7 +171,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.functions.with_streaming_response.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -186,7 +186,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.functions.with_raw_response.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -207,7 +207,7 @@ class TestAsyncFunctions: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -215,7 +215,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_agent_uuid='"12345678-1234-1234-1234-123456789012"', @@ -230,7 +230,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.functions.with_raw_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -242,7 +242,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.functions.with_streaming_response.create( path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -256,7 +256,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.create( path_agent_uuid="", @@ -264,7 +264,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -273,7 +273,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -290,7 +290,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.functions.with_raw_response.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -303,7 +303,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.functions.with_streaming_response.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -318,7 +318,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -333,7 +333,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -342,7 +342,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.functions.with_raw_response.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -355,7 +355,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.functions.with_streaming_response.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -370,7 +370,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( function_uuid='"123e4567-e89b-12d3-a456-426614174000"', diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 7ac99316..dd35e5f4 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradient.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize - def test_method_attach(self, client: GradientAI) -> None: + def test_method_attach(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.attach( "agent_uuid", ) @@ -27,7 +27,7 @@ def test_method_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_attach(self, client: GradientAI) -> None: + def test_raw_response_attach(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -39,7 +39,7 @@ def test_raw_response_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_attach(self, client: GradientAI) -> None: + def test_streaming_response_attach(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: @@ -53,7 +53,7 @@ def test_streaming_response_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_attach(self, client: GradientAI) -> None: + def test_path_params_attach(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach( "", @@ -61,7 +61,7 @@ def test_path_params_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_attach_single(self, client: GradientAI) -> None: + def test_method_attach_single(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -70,7 +70,7 @@ def test_method_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_attach_single(self, client: GradientAI) -> None: + def test_raw_response_attach_single(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -83,7 +83,7 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_attach_single(self, client: GradientAI) -> None: + def test_streaming_response_attach_single(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -98,7 +98,7 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_attach_single(self, client: GradientAI) -> None: + def test_path_params_attach_single(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -113,7 +113,7 @@ def test_path_params_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_detach(self, client: GradientAI) -> None: + def test_method_detach(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -122,7 +122,7 @@ def test_method_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_detach(self, client: GradientAI) -> None: + def test_raw_response_detach(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -135,7 +135,7 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_detach(self, client: GradientAI) -> None: + def test_streaming_response_detach(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -150,7 +150,7 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_detach(self, client: GradientAI) -> None: + def test_path_params_detach(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -171,7 +171,7 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize - async def test_method_attach(self, async_client: AsyncGradientAI) -> None: + async def test_method_attach(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach( "agent_uuid", ) @@ -179,7 +179,7 @@ async def test_method_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_attach(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -191,7 +191,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_attach(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: @@ -205,7 +205,7 @@ async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_attach(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach( "", @@ -213,7 +213,7 @@ async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: + async def test_method_attach_single(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -222,7 +222,7 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_attach_single(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -235,7 +235,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_attach_single(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -250,7 +250,7 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_attach_single(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -265,7 +265,7 @@ async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_detach(self, async_client: AsyncGradientAI) -> None: + async def test_method_detach(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -274,7 +274,7 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_detach(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -287,7 +287,7 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_detach(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -302,7 +302,7 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_detach(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 256a4757..294fa853 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradient.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, @@ -24,7 +24,7 @@ class TestRoutes: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: route = client.agents.routes.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -33,7 +33,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: route = client.agents.routes.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -47,7 +47,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -60,7 +60,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -75,7 +75,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): @@ -92,7 +92,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: route = client.agents.routes.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -101,7 +101,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -114,7 +114,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -129,7 +129,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -144,7 +144,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_add(self, client: GradientAI) -> None: + def test_method_add(self, client: Gradient) -> None: route = client.agents.routes.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -153,7 +153,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_add_with_all_params(self, client: GradientAI) -> None: + def test_method_add_with_all_params(self, client: Gradient) -> None: route = client.agents.routes.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -166,7 +166,7 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: + def test_raw_response_add(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -179,7 +179,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: + def test_streaming_response_add(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -194,7 +194,7 @@ def test_streaming_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_add(self, client: GradientAI) -> None: + def test_path_params_add(self, client: Gradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): @@ -211,7 +211,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_view(self, client: GradientAI) -> None: + def test_method_view(self, client: Gradient) -> None: route = client.agents.routes.view( "uuid", ) @@ -219,7 +219,7 @@ def test_method_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_view(self, client: GradientAI) -> None: + def test_raw_response_view(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.view( "uuid", ) @@ -231,7 +231,7 @@ def test_raw_response_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_view(self, client: GradientAI) -> None: + def test_streaming_response_view(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.view( "uuid", ) as response: @@ -245,7 +245,7 @@ def test_streaming_response_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_view(self, client: GradientAI) -> None: + def test_path_params_view(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.routes.with_raw_response.view( "", @@ -259,7 +259,7 @@ class TestAsyncRoutes: @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -268,7 +268,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -282,7 +282,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -295,7 +295,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.update( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -310,7 +310,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): @@ -327,7 +327,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -336,7 +336,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -349,7 +349,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -364,7 +364,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -379,7 +379,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: + async def test_method_add(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -388,7 +388,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -401,7 +401,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -414,7 +414,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.add( path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -429,7 +429,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): @@ -446,7 +446,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_view(self, async_client: AsyncGradientAI) -> None: + async def test_method_view(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.view( "uuid", ) @@ -454,7 +454,7 @@ async def test_method_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_view(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.view( "uuid", ) @@ -466,7 +466,7 @@ async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_view(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.view( "uuid", ) as response: @@ -480,7 +480,7 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_view(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.routes.with_raw_response.view( "", diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 158856ed..4b45edf7 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,12 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( - VersionListResponse, - VersionUpdateResponse, -) +from gradient.types.agents import VersionListResponse, VersionUpdateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -22,7 +19,7 @@ class TestVersions: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: version = client.agents.versions.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -30,7 +27,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: version = client.agents.versions.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_uuid='"12345678-1234-1234-1234-123456789012"', @@ -40,7 +37,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.versions.with_raw_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -52,7 +49,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.versions.with_streaming_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -66,7 +63,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.agents.versions.with_raw_response.update( path_uuid="", @@ -74,7 +71,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: version = client.agents.versions.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -82,7 +79,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: version = client.agents.versions.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -92,7 +89,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.versions.with_raw_response.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -104,7 +101,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.versions.with_streaming_response.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -118,7 +115,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.versions.with_raw_response.list( uuid="", @@ -132,7 +129,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -140,7 +137,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_uuid='"12345678-1234-1234-1234-123456789012"', @@ -150,7 +147,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.versions.with_raw_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -162,7 +159,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.versions.with_streaming_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -176,7 +173,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.agents.versions.with_raw_response.update( path_uuid="", @@ -184,7 +181,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -192,7 +189,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -202,7 +199,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.versions.with_raw_response.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -214,7 +211,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.versions.with_streaming_response.list( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -228,7 +225,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.versions.with_raw_response.list( uuid="", diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 95b02106..a25fd3c4 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.chat import CompletionCreateResponse +from gradient.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestCompletions: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: completion = client.chat.completions.create( messages=[ { @@ -33,7 +33,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: completion = client.chat.completions.create( messages=[ { @@ -73,7 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { @@ -91,7 +91,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { @@ -111,7 +111,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: completion_stream = client.chat.completions.create( messages=[ { @@ -126,7 +126,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: completion_stream = client.chat.completions.create( messages=[ { @@ -166,7 +166,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { @@ -184,7 +184,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { @@ -211,7 +211,7 @@ class TestAsyncCompletions: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.chat.completions.create( messages=[ { @@ -225,7 +225,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.chat.completions.create( messages=[ { @@ -265,7 +265,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.chat.completions.with_raw_response.create( messages=[ { @@ -283,7 +283,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.chat.completions.with_streaming_response.create( messages=[ { @@ -303,7 +303,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.chat.completions.create( messages=[ { @@ -318,7 +318,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.chat.completions.create( messages=[ { @@ -358,7 +358,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.chat.completions.with_raw_response.create( messages=[ { @@ -376,7 +376,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.chat.completions.with_streaming_response.create( messages=[ { diff --git a/tests/api_resources/databases/schema_registry/test_config.py b/tests/api_resources/databases/schema_registry/test_config.py index f63d62c0..024d8b0a 100644 --- a/tests/api_resources/databases/schema_registry/test_config.py +++ b/tests/api_resources/databases/schema_registry/test_config.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.databases.schema_registry import ( +from gradient.types.databases.schema_registry import ( ConfigUpdateResponse, ConfigRetrieveResponse, ConfigUpdateSubjectResponse, @@ -24,7 +24,7 @@ class TestConfig: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: config = client.databases.schema_registry.config.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -32,7 +32,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -44,7 +44,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -58,7 +58,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): client.databases.schema_registry.config.with_raw_response.retrieve( "", @@ -66,7 +66,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: config = client.databases.schema_registry.config.update( database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", compatibility_level="BACKWARD", @@ -75,7 +75,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.update( database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", compatibility_level="BACKWARD", @@ -88,7 +88,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.update( database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", compatibility_level="BACKWARD", @@ -103,7 +103,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): client.databases.schema_registry.config.with_raw_response.update( database_cluster_uuid="", @@ -112,7 +112,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_subject(self, client: GradientAI) -> None: + def test_method_retrieve_subject(self, client: Gradient) -> None: config = client.databases.schema_registry.config.retrieve_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -121,7 +121,7 @@ def test_method_retrieve_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve_subject(self, client: GradientAI) -> None: + def test_raw_response_retrieve_subject(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.retrieve_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -134,7 +134,7 @@ def test_raw_response_retrieve_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve_subject(self, client: GradientAI) -> None: + def test_streaming_response_retrieve_subject(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.retrieve_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -149,7 +149,7 @@ def test_streaming_response_retrieve_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve_subject(self, client: GradientAI) -> None: + def test_path_params_retrieve_subject(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): client.databases.schema_registry.config.with_raw_response.retrieve_subject( subject_name="customer-schema", @@ -164,7 +164,7 @@ def test_path_params_retrieve_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_subject(self, client: GradientAI) -> None: + def test_method_update_subject(self, client: Gradient) -> None: config = client.databases.schema_registry.config.update_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -174,7 +174,7 @@ def test_method_update_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update_subject(self, client: GradientAI) -> None: + def test_raw_response_update_subject(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.update_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -188,7 +188,7 @@ def test_raw_response_update_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update_subject(self, client: GradientAI) -> None: + def test_streaming_response_update_subject(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.update_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -204,7 +204,7 @@ def test_streaming_response_update_subject(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update_subject(self, client: GradientAI) -> None: + def test_path_params_update_subject(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): client.databases.schema_registry.config.with_raw_response.update_subject( subject_name="customer-schema", @@ -227,7 +227,7 @@ class TestAsyncConfig: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -235,7 +235,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -247,7 +247,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -261,7 +261,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): await async_client.databases.schema_registry.config.with_raw_response.retrieve( "", @@ -269,7 +269,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.update( database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", compatibility_level="BACKWARD", @@ -278,7 +278,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.update( database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", compatibility_level="BACKWARD", @@ -291,7 +291,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.update( database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", compatibility_level="BACKWARD", @@ -306,7 +306,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): await async_client.databases.schema_registry.config.with_raw_response.update( database_cluster_uuid="", @@ -315,7 +315,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_subject(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.retrieve_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -324,7 +324,7 @@ async def test_method_retrieve_subject(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve_subject(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -337,7 +337,7 @@ async def test_raw_response_retrieve_subject(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve_subject(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.retrieve_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -352,7 +352,7 @@ async def test_streaming_response_retrieve_subject(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_path_params_retrieve_subject(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve_subject(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject( subject_name="customer-schema", @@ -367,7 +367,7 @@ async def test_path_params_retrieve_subject(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_update_subject(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_subject(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.update_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -377,7 +377,7 @@ async def test_method_update_subject(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_raw_response_update_subject(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update_subject(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.update_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -391,7 +391,7 @@ async def test_raw_response_update_subject(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_update_subject(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update_subject(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.update_subject( subject_name="customer-schema", database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", @@ -407,7 +407,7 @@ async def test_streaming_response_update_subject(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize - async def test_path_params_update_subject(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update_subject(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): await async_client.databases.schema_registry.config.with_raw_response.update_subject( subject_name="customer-schema", diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py index cf168f61..5a63c275 100644 --- a/tests/api_resources/gpu_droplets/account/test_keys.py +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.account import ( +from gradient.types.gpu_droplets.account import ( KeyListResponse, KeyCreateResponse, KeyUpdateResponse, @@ -24,7 +24,7 @@ class TestKeys: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.create( name="My SSH Public Key", public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.create( name="My SSH Public Key", public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", @@ -46,7 +46,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.create( name="My SSH Public Key", public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.retrieve( 512189, ) @@ -69,7 +69,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.retrieve( 512189, ) @@ -81,7 +81,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.retrieve( 512189, ) as response: @@ -95,7 +95,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.update( ssh_key_identifier=512189, ) @@ -103,7 +103,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.update( ssh_key_identifier=512189, name="My SSH Public Key", @@ -112,7 +112,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.update( ssh_key_identifier=512189, ) @@ -124,7 +124,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.update( ssh_key_identifier=512189, ) as response: @@ -138,13 +138,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.list( page=1, per_page=1, @@ -153,7 +153,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.list() assert response.is_closed is True @@ -163,7 +163,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -175,7 +175,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.delete( 512189, ) @@ -183,7 +183,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.delete( 512189, ) @@ -195,7 +195,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.delete( 512189, ) as response: @@ -215,7 +215,7 @@ class TestAsyncKeys: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.create( name="My SSH Public Key", public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", @@ -224,7 +224,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.create( name="My SSH Public Key", public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", @@ -237,7 +237,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.create( name="My SSH Public Key", public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", @@ -252,7 +252,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.retrieve( 512189, ) @@ -260,7 +260,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.retrieve( 512189, ) @@ -272,7 +272,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.retrieve( 512189, ) as response: @@ -286,7 +286,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.update( ssh_key_identifier=512189, ) @@ -294,7 +294,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.update( ssh_key_identifier=512189, name="My SSH Public Key", @@ -303,7 +303,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.update( ssh_key_identifier=512189, ) @@ -315,7 +315,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.update( ssh_key_identifier=512189, ) as response: @@ -329,13 +329,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.list( page=1, per_page=1, @@ -344,7 +344,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.list() assert response.is_closed is True @@ -354,7 +354,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -366,7 +366,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.delete( 512189, ) @@ -374,7 +374,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.delete( 512189, ) @@ -386,7 +386,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.delete( 512189, ) as response: diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py index 819a5e6e..8f39a064 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradient import Gradient, AsyncGradient base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,7 +17,7 @@ class TestDroplets: @pytest.mark.skip() @parametrize - def test_method_add(self, client: GradientAI) -> None: + def test_method_add(self, client: Gradient) -> None: droplet = client.gpu_droplets.firewalls.droplets.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -26,7 +26,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: + def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.droplets.with_raw_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -39,7 +39,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: + def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.droplets.with_streaming_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -54,7 +54,7 @@ def test_streaming_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_add(self, client: GradientAI) -> None: + def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.droplets.with_raw_response.add( firewall_id="", @@ -63,7 +63,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_remove(self, client: GradientAI) -> None: + def test_method_remove(self, client: Gradient) -> None: droplet = client.gpu_droplets.firewalls.droplets.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -72,7 +72,7 @@ def test_method_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: + def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.droplets.with_raw_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -85,7 +85,7 @@ def test_raw_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: + def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -100,7 +100,7 @@ def test_streaming_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: + def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.droplets.with_raw_response.remove( firewall_id="", @@ -115,7 +115,7 @@ class TestAsyncDroplets: @pytest.mark.skip() @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: + async def test_method_add(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.firewalls.droplets.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -124,7 +124,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -137,7 +137,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -152,7 +152,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( firewall_id="", @@ -161,7 +161,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + async def test_method_remove(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.firewalls.droplets.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -170,7 +170,7 @@ async def test_method_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -183,7 +183,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", droplet_ids=[49696269], @@ -198,7 +198,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( firewall_id="", diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py index b2eab40c..2bd74228 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_rules.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradient import Gradient, AsyncGradient base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,7 +17,7 @@ class TestRules: @pytest.mark.skip() @parametrize - def test_method_add(self, client: GradientAI) -> None: + def test_method_add(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -25,7 +25,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_add_with_all_params(self, client: GradientAI) -> None: + def test_method_add_with_all_params(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", inbound_rules=[ @@ -59,7 +59,7 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: + def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.rules.with_raw_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -71,7 +71,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: + def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.rules.with_streaming_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: @@ -85,7 +85,7 @@ def test_streaming_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_add(self, client: GradientAI) -> None: + def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.rules.with_raw_response.add( firewall_id="", @@ -93,7 +93,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_remove(self, client: GradientAI) -> None: + def test_method_remove(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -101,7 +101,7 @@ def test_method_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_remove_with_all_params(self, client: GradientAI) -> None: + def test_method_remove_with_all_params(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", inbound_rules=[ @@ -135,7 +135,7 @@ def test_method_remove_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: + def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.rules.with_raw_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -147,7 +147,7 @@ def test_raw_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: + def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.rules.with_streaming_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: @@ -161,7 +161,7 @@ def test_streaming_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: + def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.rules.with_raw_response.remove( firewall_id="", @@ -175,7 +175,7 @@ class TestAsyncRules: @pytest.mark.skip() @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: + async def test_method_add(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -183,7 +183,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", inbound_rules=[ @@ -217,7 +217,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -229,7 +229,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: @@ -243,7 +243,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( firewall_id="", @@ -251,7 +251,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + async def test_method_remove(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -259,7 +259,7 @@ async def test_method_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_remove_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_remove_with_all_params(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", inbound_rules=[ @@ -293,7 +293,7 @@ async def test_method_remove_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) @@ -305,7 +305,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: @@ -319,7 +319,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( firewall_id="", diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py index 25c9362b..cbd86f65 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_tags.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradient import Gradient, AsyncGradient base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,7 +17,7 @@ class TestTags: @pytest.mark.skip() @parametrize - def test_method_add(self, client: GradientAI) -> None: + def test_method_add(self, client: Gradient) -> None: tag = client.gpu_droplets.firewalls.tags.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -26,7 +26,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: + def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.tags.with_raw_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -39,7 +39,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: + def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.tags.with_streaming_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -54,7 +54,7 @@ def test_streaming_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_add(self, client: GradientAI) -> None: + def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.tags.with_raw_response.add( firewall_id="", @@ -63,7 +63,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_remove(self, client: GradientAI) -> None: + def test_method_remove(self, client: Gradient) -> None: tag = client.gpu_droplets.firewalls.tags.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -72,7 +72,7 @@ def test_method_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: + def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.tags.with_raw_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -85,7 +85,7 @@ def test_raw_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: + def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.tags.with_streaming_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -100,7 +100,7 @@ def test_streaming_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: + def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.tags.with_raw_response.remove( firewall_id="", @@ -115,7 +115,7 @@ class TestAsyncTags: @pytest.mark.skip() @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: + async def test_method_add(self, async_client: AsyncGradient) -> None: tag = await async_client.gpu_droplets.firewalls.tags.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -124,7 +124,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -137,7 +137,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.add( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -152,7 +152,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( firewall_id="", @@ -161,7 +161,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + async def test_method_remove(self, async_client: AsyncGradient) -> None: tag = await async_client.gpu_droplets.firewalls.tags.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -170,7 +170,7 @@ async def test_method_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -183,7 +183,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.remove( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", tags=["frontend"], @@ -198,7 +198,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( firewall_id="", diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index ad26db8a..9417a880 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.floating_ips import ( +from gradient.types.gpu_droplets.floating_ips import ( ActionListResponse, ActionCreateResponse, ActionRetrieveResponse, @@ -23,7 +23,7 @@ class TestActions: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.create( floating_ip="45.55.96.47", type="assign", @@ -32,7 +32,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="45.55.96.47", type="assign", @@ -45,7 +45,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( floating_ip="45.55.96.47", type="assign", @@ -60,7 +60,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_create_overload_1(self, client: GradientAI) -> None: + def test_path_params_create_overload_1(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="", @@ -69,7 +69,7 @@ def test_path_params_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.create( floating_ip="45.55.96.47", droplet_id=758604968, @@ -79,7 +79,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="45.55.96.47", droplet_id=758604968, @@ -93,7 +93,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( floating_ip="45.55.96.47", droplet_id=758604968, @@ -109,7 +109,7 @@ def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_create_overload_2(self, client: GradientAI) -> None: + def test_path_params_create_overload_2(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="", @@ -119,7 +119,7 @@ def test_path_params_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.retrieve( action_id=36804636, floating_ip="45.55.96.47", @@ -128,7 +128,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( action_id=36804636, floating_ip="45.55.96.47", @@ -141,7 +141,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( action_id=36804636, floating_ip="45.55.96.47", @@ -156,7 +156,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( action_id=36804636, @@ -165,7 +165,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.list( "192.168.1.1", ) @@ -173,7 +173,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.list( "192.168.1.1", ) @@ -185,7 +185,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.list( "192.168.1.1", ) as response: @@ -199,7 +199,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): client.gpu_droplets.floating_ips.actions.with_raw_response.list( "", @@ -213,7 +213,7 @@ class TestAsyncActions: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.create( floating_ip="45.55.96.47", type="assign", @@ -222,7 +222,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="45.55.96.47", type="assign", @@ -235,7 +235,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( floating_ip="45.55.96.47", type="assign", @@ -250,7 +250,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_path_params_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_create_overload_1(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="", @@ -259,7 +259,7 @@ async def test_path_params_create_overload_1(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.create( floating_ip="45.55.96.47", droplet_id=758604968, @@ -269,7 +269,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="45.55.96.47", droplet_id=758604968, @@ -283,7 +283,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( floating_ip="45.55.96.47", droplet_id=758604968, @@ -299,7 +299,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_path_params_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_create_overload_2(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( floating_ip="", @@ -309,7 +309,7 @@ async def test_path_params_create_overload_2(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.retrieve( action_id=36804636, floating_ip="45.55.96.47", @@ -318,7 +318,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( action_id=36804636, floating_ip="45.55.96.47", @@ -331,7 +331,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( action_id=36804636, floating_ip="45.55.96.47", @@ -346,7 +346,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( action_id=36804636, @@ -355,7 +355,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.list( "192.168.1.1", ) @@ -363,7 +363,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( "192.168.1.1", ) @@ -375,7 +375,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.list( "192.168.1.1", ) as response: @@ -389,7 +389,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( "", diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index 35861bcb..f59e3986 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -7,10 +7,10 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.shared import Action -from do_gradientai.types.gpu_droplets.images import ActionListResponse +from gradient.types.shared import Action +from gradient.types.gpu_droplets.images import ActionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestActions: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.create( image_id=62137902, type="convert", @@ -29,7 +29,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.create( image_id=62137902, type="convert", @@ -42,7 +42,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.create( image_id=62137902, type="convert", @@ -57,7 +57,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.create( image_id=62137902, region="nyc3", @@ -67,7 +67,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.create( image_id=62137902, region="nyc3", @@ -81,7 +81,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.create( image_id=62137902, region="nyc3", @@ -97,7 +97,7 @@ def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.retrieve( action_id=36804636, image_id=62137902, @@ -106,7 +106,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.retrieve( action_id=36804636, image_id=62137902, @@ -119,7 +119,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.retrieve( action_id=36804636, image_id=62137902, @@ -134,7 +134,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.list( 0, ) @@ -142,7 +142,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.list( 0, ) @@ -154,7 +154,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.list( 0, ) as response: @@ -174,7 +174,7 @@ class TestAsyncActions: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.create( image_id=62137902, type="convert", @@ -183,7 +183,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.create( image_id=62137902, type="convert", @@ -196,7 +196,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.create( image_id=62137902, type="convert", @@ -211,7 +211,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.create( image_id=62137902, region="nyc3", @@ -221,7 +221,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.create( image_id=62137902, region="nyc3", @@ -235,7 +235,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.create( image_id=62137902, region="nyc3", @@ -251,7 +251,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.retrieve( action_id=36804636, image_id=62137902, @@ -260,7 +260,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.retrieve( action_id=36804636, image_id=62137902, @@ -273,7 +273,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.retrieve( action_id=36804636, image_id=62137902, @@ -288,7 +288,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.list( 0, ) @@ -296,7 +296,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.list( 0, ) @@ -308,7 +308,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.list( 0, ) as response: diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py index f22213e2..200dad39 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradient import Gradient, AsyncGradient base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,7 +17,7 @@ class TestDroplets: @pytest.mark.skip() @parametrize - def test_method_add(self, client: GradientAI) -> None: + def test_method_add(self, client: Gradient) -> None: droplet = client.gpu_droplets.load_balancers.droplets.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -26,7 +26,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: + def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.droplets.with_raw_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -39,7 +39,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: + def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -54,7 +54,7 @@ def test_streaming_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_add(self, client: GradientAI) -> None: + def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.droplets.with_raw_response.add( lb_id="", @@ -63,7 +63,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_remove(self, client: GradientAI) -> None: + def test_method_remove(self, client: Gradient) -> None: droplet = client.gpu_droplets.load_balancers.droplets.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -72,7 +72,7 @@ def test_method_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: + def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -85,7 +85,7 @@ def test_raw_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: + def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -100,7 +100,7 @@ def test_streaming_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: + def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( lb_id="", @@ -115,7 +115,7 @@ class TestAsyncDroplets: @pytest.mark.skip() @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: + async def test_method_add(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.load_balancers.droplets.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -124,7 +124,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -137,7 +137,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -152,7 +152,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( lb_id="", @@ -161,7 +161,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + async def test_method_remove(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.load_balancers.droplets.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -170,7 +170,7 @@ async def test_method_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -183,7 +183,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", droplet_ids=[3164444, 3164445], @@ -198,7 +198,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( lb_id="", diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py index d53bd0db..4f1decdf 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradient import Gradient, AsyncGradient base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,7 +17,7 @@ class TestForwardingRules: @pytest.mark.skip() @parametrize - def test_method_add(self, client: GradientAI) -> None: + def test_method_add(self, client: Gradient) -> None: forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -33,7 +33,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: + def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -53,7 +53,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: + def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -75,7 +75,7 @@ def test_streaming_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_add(self, client: GradientAI) -> None: + def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( lb_id="", @@ -91,7 +91,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_remove(self, client: GradientAI) -> None: + def test_method_remove(self, client: Gradient) -> None: forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -107,7 +107,7 @@ def test_method_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: + def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -127,7 +127,7 @@ def test_raw_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: + def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -149,7 +149,7 @@ def test_streaming_response_remove(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: + def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( lb_id="", @@ -171,7 +171,7 @@ class TestAsyncForwardingRules: @pytest.mark.skip() @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: + async def test_method_add(self, async_client: AsyncGradient) -> None: forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -187,7 +187,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -207,7 +207,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -229,7 +229,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( lb_id="", @@ -245,7 +245,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + async def test_method_remove(self, async_client: AsyncGradient) -> None: forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -261,7 +261,7 @@ async def test_method_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -281,7 +281,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -303,7 +303,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( lb_id="", diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py index 74e45b44..7a52c608 100644 --- a/tests/api_resources/gpu_droplets/test_actions.py +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( ActionListResponse, ActionInitiateResponse, ActionRetrieveResponse, @@ -24,7 +24,7 @@ class TestActions: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.actions.retrieve( action_id=36804636, droplet_id=3164444, @@ -33,7 +33,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.retrieve( action_id=36804636, droplet_id=3164444, @@ -46,7 +46,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.retrieve( action_id=36804636, droplet_id=3164444, @@ -61,7 +61,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.actions.list( droplet_id=3164444, ) @@ -69,7 +69,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: action = client.gpu_droplets.actions.list( droplet_id=3164444, page=1, @@ -79,7 +79,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.list( droplet_id=3164444, ) @@ -91,7 +91,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.list( droplet_id=3164444, ) as response: @@ -105,7 +105,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_bulk_initiate_overload_1(self, client: GradientAI) -> None: + def test_method_bulk_initiate_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( type="reboot", ) @@ -113,7 +113,7 @@ def test_method_bulk_initiate_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_bulk_initiate_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_bulk_initiate_with_all_params_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( type="reboot", tag_name="tag_name", @@ -122,7 +122,7 @@ def test_method_bulk_initiate_with_all_params_overload_1(self, client: GradientA @pytest.mark.skip() @parametrize - def test_raw_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: + def test_raw_response_bulk_initiate_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( type="reboot", ) @@ -134,7 +134,7 @@ def test_raw_response_bulk_initiate_overload_1(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_streaming_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_bulk_initiate_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( type="reboot", ) as response: @@ -148,7 +148,7 @@ def test_streaming_response_bulk_initiate_overload_1(self, client: GradientAI) - @pytest.mark.skip() @parametrize - def test_method_bulk_initiate_overload_2(self, client: GradientAI) -> None: + def test_method_bulk_initiate_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( type="reboot", ) @@ -156,7 +156,7 @@ def test_method_bulk_initiate_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_bulk_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_bulk_initiate_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( type="reboot", tag_name="tag_name", @@ -166,7 +166,7 @@ def test_method_bulk_initiate_with_all_params_overload_2(self, client: GradientA @pytest.mark.skip() @parametrize - def test_raw_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: + def test_raw_response_bulk_initiate_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( type="reboot", ) @@ -178,7 +178,7 @@ def test_raw_response_bulk_initiate_overload_2(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_streaming_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_bulk_initiate_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( type="reboot", ) as response: @@ -192,7 +192,7 @@ def test_streaming_response_bulk_initiate_overload_2(self, client: GradientAI) - @pytest.mark.skip() @parametrize - def test_method_initiate_overload_1(self, client: GradientAI) -> None: + def test_method_initiate_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -201,7 +201,7 @@ def test_method_initiate_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_1(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -214,7 +214,7 @@ def test_raw_response_initiate_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -229,7 +229,7 @@ def test_streaming_response_initiate_overload_1(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_2(self, client: GradientAI) -> None: + def test_method_initiate_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -238,7 +238,7 @@ def test_method_initiate_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -252,7 +252,7 @@ def test_method_initiate_with_all_params_overload_2(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_2(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="enable_backups", @@ -265,7 +265,7 @@ def test_raw_response_initiate_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="enable_backups", @@ -280,7 +280,7 @@ def test_streaming_response_initiate_overload_2(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_3(self, client: GradientAI) -> None: + def test_method_initiate_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -289,7 +289,7 @@ def test_method_initiate_overload_3(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_3(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -303,7 +303,7 @@ def test_method_initiate_with_all_params_overload_3(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_3(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_3(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="enable_backups", @@ -316,7 +316,7 @@ def test_raw_response_initiate_overload_3(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_3(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_3(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="enable_backups", @@ -331,7 +331,7 @@ def test_streaming_response_initiate_overload_3(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_4(self, client: GradientAI) -> None: + def test_method_initiate_overload_4(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -340,7 +340,7 @@ def test_method_initiate_overload_4(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_4(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_4(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -350,7 +350,7 @@ def test_method_initiate_with_all_params_overload_4(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_4(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_4(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -363,7 +363,7 @@ def test_raw_response_initiate_overload_4(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_4(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_4(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -378,7 +378,7 @@ def test_streaming_response_initiate_overload_4(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_5(self, client: GradientAI) -> None: + def test_method_initiate_overload_5(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -387,7 +387,7 @@ def test_method_initiate_overload_5(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_5(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_5(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -398,7 +398,7 @@ def test_method_initiate_with_all_params_overload_5(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_5(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_5(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -411,7 +411,7 @@ def test_raw_response_initiate_overload_5(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_5(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_5(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -426,7 +426,7 @@ def test_streaming_response_initiate_overload_5(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_6(self, client: GradientAI) -> None: + def test_method_initiate_overload_6(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -435,7 +435,7 @@ def test_method_initiate_overload_6(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_6(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_6(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -445,7 +445,7 @@ def test_method_initiate_with_all_params_overload_6(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_6(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_6(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -458,7 +458,7 @@ def test_raw_response_initiate_overload_6(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_6(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_6(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -473,7 +473,7 @@ def test_streaming_response_initiate_overload_6(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_7(self, client: GradientAI) -> None: + def test_method_initiate_overload_7(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -482,7 +482,7 @@ def test_method_initiate_overload_7(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_7(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_7(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -492,7 +492,7 @@ def test_method_initiate_with_all_params_overload_7(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_7(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_7(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -505,7 +505,7 @@ def test_raw_response_initiate_overload_7(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_7(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_7(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -520,7 +520,7 @@ def test_streaming_response_initiate_overload_7(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_8(self, client: GradientAI) -> None: + def test_method_initiate_overload_8(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -529,7 +529,7 @@ def test_method_initiate_overload_8(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_8(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_8(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -539,7 +539,7 @@ def test_method_initiate_with_all_params_overload_8(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_8(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_8(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -552,7 +552,7 @@ def test_raw_response_initiate_overload_8(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_8(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_8(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -567,7 +567,7 @@ def test_streaming_response_initiate_overload_8(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_method_initiate_overload_9(self, client: GradientAI) -> None: + def test_method_initiate_overload_9(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -576,7 +576,7 @@ def test_method_initiate_overload_9(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_with_all_params_overload_9(self, client: GradientAI) -> None: + def test_method_initiate_with_all_params_overload_9(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -586,7 +586,7 @@ def test_method_initiate_with_all_params_overload_9(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_raw_response_initiate_overload_9(self, client: GradientAI) -> None: + def test_raw_response_initiate_overload_9(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -599,7 +599,7 @@ def test_raw_response_initiate_overload_9(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_overload_9(self, client: GradientAI) -> None: + def test_streaming_response_initiate_overload_9(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -620,7 +620,7 @@ class TestAsyncActions: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.retrieve( action_id=36804636, droplet_id=3164444, @@ -629,7 +629,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.retrieve( action_id=36804636, droplet_id=3164444, @@ -642,7 +642,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.retrieve( action_id=36804636, droplet_id=3164444, @@ -657,7 +657,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.list( droplet_id=3164444, ) @@ -665,7 +665,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.list( droplet_id=3164444, page=1, @@ -675,7 +675,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.list( droplet_id=3164444, ) @@ -687,7 +687,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.list( droplet_id=3164444, ) as response: @@ -701,7 +701,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( type="reboot", ) @@ -709,7 +709,7 @@ async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( type="reboot", tag_name="tag_name", @@ -718,7 +718,7 @@ async def test_method_bulk_initiate_with_all_params_overload_1(self, async_clien @pytest.mark.skip() @parametrize - async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( type="reboot", ) @@ -730,7 +730,7 @@ async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( type="reboot", ) as response: @@ -744,7 +744,7 @@ async def test_streaming_response_bulk_initiate_overload_1(self, async_client: A @pytest.mark.skip() @parametrize - async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( type="reboot", ) @@ -752,7 +752,7 @@ async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( type="reboot", tag_name="tag_name", @@ -762,7 +762,7 @@ async def test_method_bulk_initiate_with_all_params_overload_2(self, async_clien @pytest.mark.skip() @parametrize - async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( type="reboot", ) @@ -774,7 +774,7 @@ async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( type="reboot", ) as response: @@ -788,7 +788,7 @@ async def test_streaming_response_bulk_initiate_overload_2(self, async_client: A @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -797,7 +797,7 @@ async def test_method_initiate_overload_1(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -810,7 +810,7 @@ async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -825,7 +825,7 @@ async def test_streaming_response_initiate_overload_1(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -834,7 +834,7 @@ async def test_method_initiate_overload_2(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -848,7 +848,7 @@ async def test_method_initiate_with_all_params_overload_2(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="enable_backups", @@ -861,7 +861,7 @@ async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="enable_backups", @@ -876,7 +876,7 @@ async def test_streaming_response_initiate_overload_2(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -885,7 +885,7 @@ async def test_method_initiate_overload_3(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="enable_backups", @@ -899,7 +899,7 @@ async def test_method_initiate_with_all_params_overload_3(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="enable_backups", @@ -912,7 +912,7 @@ async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="enable_backups", @@ -927,7 +927,7 @@ async def test_streaming_response_initiate_overload_3(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_4(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -936,7 +936,7 @@ async def test_method_initiate_overload_4(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -946,7 +946,7 @@ async def test_method_initiate_with_all_params_overload_4(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -959,7 +959,7 @@ async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -974,7 +974,7 @@ async def test_streaming_response_initiate_overload_4(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_5(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -983,7 +983,7 @@ async def test_method_initiate_overload_5(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -994,7 +994,7 @@ async def test_method_initiate_with_all_params_overload_5(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -1007,7 +1007,7 @@ async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -1022,7 +1022,7 @@ async def test_streaming_response_initiate_overload_5(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_6(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1031,7 +1031,7 @@ async def test_method_initiate_overload_6(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1041,7 +1041,7 @@ async def test_method_initiate_with_all_params_overload_6(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -1054,7 +1054,7 @@ async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -1069,7 +1069,7 @@ async def test_streaming_response_initiate_overload_6(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_7(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1078,7 +1078,7 @@ async def test_method_initiate_overload_7(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1088,7 +1088,7 @@ async def test_method_initiate_with_all_params_overload_7(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -1101,7 +1101,7 @@ async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -1116,7 +1116,7 @@ async def test_streaming_response_initiate_overload_7(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_8(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1125,7 +1125,7 @@ async def test_method_initiate_overload_8(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1135,7 +1135,7 @@ async def test_method_initiate_with_all_params_overload_8(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -1148,7 +1148,7 @@ async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", @@ -1163,7 +1163,7 @@ async def test_streaming_response_initiate_overload_8(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_method_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_overload_9(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1172,7 +1172,7 @@ async def test_method_initiate_overload_9(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( droplet_id=3164444, type="reboot", @@ -1182,7 +1182,7 @@ async def test_method_initiate_with_all_params_overload_9(self, async_client: As @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( droplet_id=3164444, type="reboot", @@ -1195,7 +1195,7 @@ async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( droplet_id=3164444, type="reboot", diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index cec0371d..16be3e00 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( AutoscaleListResponse, AutoscaleCreateResponse, AutoscaleUpdateResponse, @@ -26,7 +26,7 @@ class TestAutoscale: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.create( config={ "max_instances": 5, @@ -44,7 +44,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.create( config={ "max_instances": 5, @@ -72,7 +72,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.create( config={ "max_instances": 5, @@ -94,7 +94,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.create( config={ "max_instances": 5, @@ -118,7 +118,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.retrieve( "autoscale_pool_id", ) @@ -126,7 +126,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.retrieve( "autoscale_pool_id", ) @@ -138,7 +138,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.retrieve( "autoscale_pool_id", ) as response: @@ -152,7 +152,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): client.gpu_droplets.autoscale.with_raw_response.retrieve( "", @@ -160,7 +160,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -176,7 +176,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -199,7 +199,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -219,7 +219,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -241,7 +241,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): client.gpu_droplets.autoscale.with_raw_response.update( autoscale_pool_id="", @@ -257,13 +257,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list() assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list( name="name", page=1, @@ -273,7 +273,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.list() assert response.is_closed is True @@ -283,7 +283,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -295,7 +295,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.delete( "autoscale_pool_id", ) @@ -303,7 +303,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.delete( "autoscale_pool_id", ) @@ -315,7 +315,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.delete( "autoscale_pool_id", ) as response: @@ -329,7 +329,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): client.gpu_droplets.autoscale.with_raw_response.delete( "", @@ -337,7 +337,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_dangerous(self, client: GradientAI) -> None: + def test_method_delete_dangerous(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.delete_dangerous( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", x_dangerous=True, @@ -346,7 +346,7 @@ def test_method_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: + def test_raw_response_delete_dangerous(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", x_dangerous=True, @@ -359,7 +359,7 @@ def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: + def test_streaming_response_delete_dangerous(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", x_dangerous=True, @@ -374,7 +374,7 @@ def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete_dangerous(self, client: GradientAI) -> None: + def test_path_params_delete_dangerous(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( autoscale_pool_id="", @@ -383,7 +383,7 @@ def test_path_params_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_history(self, client: GradientAI) -> None: + def test_method_list_history(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -391,7 +391,7 @@ def test_method_list_history(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_history_with_all_params(self, client: GradientAI) -> None: + def test_method_list_history_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", page=1, @@ -401,7 +401,7 @@ def test_method_list_history_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_history(self, client: GradientAI) -> None: + def test_raw_response_list_history(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -413,7 +413,7 @@ def test_raw_response_list_history(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_history(self, client: GradientAI) -> None: + def test_streaming_response_list_history(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: @@ -427,7 +427,7 @@ def test_streaming_response_list_history(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list_history(self, client: GradientAI) -> None: + def test_path_params_list_history(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): client.gpu_droplets.autoscale.with_raw_response.list_history( autoscale_pool_id="", @@ -435,7 +435,7 @@ def test_path_params_list_history(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_members(self, client: GradientAI) -> None: + def test_method_list_members(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -443,7 +443,7 @@ def test_method_list_members(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_members_with_all_params(self, client: GradientAI) -> None: + def test_method_list_members_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", page=1, @@ -453,7 +453,7 @@ def test_method_list_members_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_members(self, client: GradientAI) -> None: + def test_raw_response_list_members(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -465,7 +465,7 @@ def test_raw_response_list_members(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_members(self, client: GradientAI) -> None: + def test_streaming_response_list_members(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: @@ -479,7 +479,7 @@ def test_streaming_response_list_members(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list_members(self, client: GradientAI) -> None: + def test_path_params_list_members(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): client.gpu_droplets.autoscale.with_raw_response.list_members( autoscale_pool_id="", @@ -493,7 +493,7 @@ class TestAsyncAutoscale: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.create( config={ "max_instances": 5, @@ -511,7 +511,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.create( config={ "max_instances": 5, @@ -539,7 +539,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.create( config={ "max_instances": 5, @@ -561,7 +561,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.create( config={ "max_instances": 5, @@ -585,7 +585,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.retrieve( "autoscale_pool_id", ) @@ -593,7 +593,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( "autoscale_pool_id", ) @@ -605,7 +605,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.retrieve( "autoscale_pool_id", ) as response: @@ -619,7 +619,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( "", @@ -627,7 +627,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -643,7 +643,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -666,7 +666,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -686,7 +686,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.update( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", config={"target_number_instances": 2}, @@ -708,7 +708,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): await async_client.gpu_droplets.autoscale.with_raw_response.update( autoscale_pool_id="", @@ -724,13 +724,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list() assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list( name="name", page=1, @@ -740,7 +740,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.list() assert response.is_closed is True @@ -750,7 +750,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -762,7 +762,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.delete( "autoscale_pool_id", ) @@ -770,7 +770,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.delete( "autoscale_pool_id", ) @@ -782,7 +782,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.delete( "autoscale_pool_id", ) as response: @@ -796,7 +796,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): await async_client.gpu_droplets.autoscale.with_raw_response.delete( "", @@ -804,7 +804,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.delete_dangerous( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", x_dangerous=True, @@ -813,7 +813,7 @@ async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", x_dangerous=True, @@ -826,7 +826,7 @@ async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", x_dangerous=True, @@ -841,7 +841,7 @@ async def test_streaming_response_delete_dangerous(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_path_params_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete_dangerous(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( autoscale_pool_id="", @@ -850,7 +850,7 @@ async def test_path_params_delete_dangerous(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_list_history(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_history(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -858,7 +858,7 @@ async def test_method_list_history(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_history_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_history_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", page=1, @@ -868,7 +868,7 @@ async def test_method_list_history_with_all_params(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_raw_response_list_history(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_history(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -880,7 +880,7 @@ async def test_raw_response_list_history(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_history(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_history(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.list_history( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: @@ -894,7 +894,7 @@ async def test_streaming_response_list_history(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_path_params_list_history(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_history(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): await async_client.gpu_droplets.autoscale.with_raw_response.list_history( autoscale_pool_id="", @@ -902,7 +902,7 @@ async def test_path_params_list_history(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_list_members(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_members(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -910,7 +910,7 @@ async def test_method_list_members(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_members_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_members_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", page=1, @@ -920,7 +920,7 @@ async def test_method_list_members_with_all_params(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_raw_response_list_members(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_members(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) @@ -932,7 +932,7 @@ async def test_raw_response_list_members(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_members(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_members(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.list_members( autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: @@ -946,7 +946,7 @@ async def test_streaming_response_list_members(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_path_params_list_members(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_members(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): await async_client.gpu_droplets.autoscale.with_raw_response.list_members( autoscale_pool_id="", diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index 334c701f..ecff25de 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupRetrievePolicyResponse, @@ -24,7 +24,7 @@ class TestBackups: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list( droplet_id=3164444, ) @@ -32,7 +32,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list( droplet_id=3164444, page=1, @@ -42,7 +42,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.list( droplet_id=3164444, ) @@ -54,7 +54,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.list( droplet_id=3164444, ) as response: @@ -68,13 +68,13 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_policies(self, client: GradientAI) -> None: + def test_method_list_policies(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list_policies() assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_policies_with_all_params(self, client: GradientAI) -> None: + def test_method_list_policies_with_all_params(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list_policies( page=1, per_page=1, @@ -83,7 +83,7 @@ def test_method_list_policies_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_policies(self, client: GradientAI) -> None: + def test_raw_response_list_policies(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.list_policies() assert response.is_closed is True @@ -93,7 +93,7 @@ def test_raw_response_list_policies(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_policies(self, client: GradientAI) -> None: + def test_streaming_response_list_policies(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.list_policies() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -105,13 +105,13 @@ def test_streaming_response_list_policies(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_supported_policies(self, client: GradientAI) -> None: + def test_method_list_supported_policies(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list_supported_policies() assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_list_supported_policies(self, client: GradientAI) -> None: + def test_raw_response_list_supported_policies(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.list_supported_policies() assert response.is_closed is True @@ -121,7 +121,7 @@ def test_raw_response_list_supported_policies(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_supported_policies(self, client: GradientAI) -> None: + def test_streaming_response_list_supported_policies(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -133,7 +133,7 @@ def test_streaming_response_list_supported_policies(self, client: GradientAI) -> @pytest.mark.skip() @parametrize - def test_method_retrieve_policy(self, client: GradientAI) -> None: + def test_method_retrieve_policy(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.retrieve_policy( 1, ) @@ -141,7 +141,7 @@ def test_method_retrieve_policy(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve_policy(self, client: GradientAI) -> None: + def test_raw_response_retrieve_policy(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.retrieve_policy( 1, ) @@ -153,7 +153,7 @@ def test_raw_response_retrieve_policy(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve_policy(self, client: GradientAI) -> None: + def test_streaming_response_retrieve_policy(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.retrieve_policy( 1, ) as response: @@ -173,7 +173,7 @@ class TestAsyncBackups: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list( droplet_id=3164444, ) @@ -181,7 +181,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list( droplet_id=3164444, page=1, @@ -191,7 +191,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.list( droplet_id=3164444, ) @@ -203,7 +203,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.list( droplet_id=3164444, ) as response: @@ -217,13 +217,13 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_list_policies(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_policies(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list_policies() assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_policies_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_policies_with_all_params(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list_policies( page=1, per_page=1, @@ -232,7 +232,7 @@ async def test_method_list_policies_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_raw_response_list_policies(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_policies(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.list_policies() assert response.is_closed is True @@ -242,7 +242,7 @@ async def test_raw_response_list_policies(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_streaming_response_list_policies(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_policies(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.list_policies() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -254,13 +254,13 @@ async def test_streaming_response_list_policies(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_method_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_supported_policies(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list_supported_policies() assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_supported_policies(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.list_supported_policies() assert response.is_closed is True @@ -270,7 +270,7 @@ async def test_raw_response_list_supported_policies(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -282,7 +282,7 @@ async def test_streaming_response_list_supported_policies(self, async_client: As @pytest.mark.skip() @parametrize - async def test_method_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_policy(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.retrieve_policy( 1, ) @@ -290,7 +290,7 @@ async def test_method_retrieve_policy(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve_policy(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.retrieve_policy( 1, ) @@ -302,7 +302,7 @@ async def test_raw_response_retrieve_policy(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.retrieve_policy( 1, ) as response: diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index 2aef1fce..3715ced7 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( DestroyWithAssociatedResourceListResponse, DestroyWithAssociatedResourceCheckStatusResponse, ) @@ -22,7 +22,7 @@ class TestDestroyWithAssociatedResources: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.list( 1, ) @@ -32,7 +32,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( 1, ) @@ -46,7 +46,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( 1, ) as response: @@ -62,7 +62,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_check_status(self, client: GradientAI) -> None: + def test_method_check_status(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.check_status( 1, ) @@ -72,7 +72,7 @@ def test_method_check_status(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_check_status(self, client: GradientAI) -> None: + def test_raw_response_check_status(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( 1, ) @@ -86,7 +86,7 @@ def test_raw_response_check_status(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_check_status(self, client: GradientAI) -> None: + def test_streaming_response_check_status(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( 1, ) as response: @@ -102,7 +102,7 @@ def test_streaming_response_check_status(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_dangerous(self, client: GradientAI) -> None: + def test_method_delete_dangerous(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( droplet_id=3164444, x_dangerous=True, @@ -111,7 +111,7 @@ def test_method_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: + def test_raw_response_delete_dangerous(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( droplet_id=3164444, x_dangerous=True, @@ -124,7 +124,7 @@ def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: + def test_streaming_response_delete_dangerous(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( droplet_id=3164444, x_dangerous=True, @@ -139,7 +139,7 @@ def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_selective(self, client: GradientAI) -> None: + def test_method_delete_selective(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( droplet_id=3164444, ) @@ -147,7 +147,7 @@ def test_method_delete_selective(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_selective_with_all_params(self, client: GradientAI) -> None: + def test_method_delete_selective_with_all_params(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( droplet_id=3164444, floating_ips=["6186916"], @@ -160,7 +160,7 @@ def test_method_delete_selective_with_all_params(self, client: GradientAI) -> No @pytest.mark.skip() @parametrize - def test_raw_response_delete_selective(self, client: GradientAI) -> None: + def test_raw_response_delete_selective(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( droplet_id=3164444, ) @@ -172,7 +172,7 @@ def test_raw_response_delete_selective(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete_selective(self, client: GradientAI) -> None: + def test_streaming_response_delete_selective(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( droplet_id=3164444, ) as response: @@ -186,7 +186,7 @@ def test_streaming_response_delete_selective(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retry(self, client: GradientAI) -> None: + def test_method_retry(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.retry( 1, ) @@ -194,7 +194,7 @@ def test_method_retry(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retry(self, client: GradientAI) -> None: + def test_raw_response_retry(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( 1, ) @@ -206,7 +206,7 @@ def test_raw_response_retry(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retry(self, client: GradientAI) -> None: + def test_streaming_response_retry(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( 1, ) as response: @@ -226,7 +226,7 @@ class TestAsyncDestroyWithAssociatedResources: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.list( 1, ) @@ -236,7 +236,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( 1, ) @@ -250,7 +250,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( 1, ) as response: @@ -266,7 +266,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_check_status(self, async_client: AsyncGradientAI) -> None: + async def test_method_check_status(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( await async_client.gpu_droplets.destroy_with_associated_resources.check_status( 1, @@ -278,7 +278,7 @@ async def test_method_check_status(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_check_status(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_check_status(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( 1, ) @@ -292,7 +292,7 @@ async def test_raw_response_check_status(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_check_status(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_check_status(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( 1, ) as response: @@ -308,7 +308,7 @@ async def test_streaming_response_check_status(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( await async_client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( droplet_id=3164444, @@ -319,7 +319,7 @@ async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( droplet_id=3164444, x_dangerous=True, @@ -332,7 +332,7 @@ async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( droplet_id=3164444, x_dangerous=True, @@ -347,7 +347,7 @@ async def test_streaming_response_delete_dangerous(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_method_delete_selective(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_selective(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective( droplet_id=3164444, @@ -357,7 +357,7 @@ async def test_method_delete_selective(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective( droplet_id=3164444, @@ -372,7 +372,7 @@ async def test_method_delete_selective_with_all_params(self, async_client: Async @pytest.mark.skip() @parametrize - async def test_raw_response_delete_selective(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete_selective(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( droplet_id=3164444, ) @@ -384,7 +384,7 @@ async def test_raw_response_delete_selective(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_streaming_response_delete_selective(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete_selective(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( droplet_id=3164444, ) as response: @@ -398,7 +398,7 @@ async def test_streaming_response_delete_selective(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_method_retry(self, async_client: AsyncGradientAI) -> None: + async def test_method_retry(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.retry( 1, ) @@ -406,7 +406,7 @@ async def test_method_retry(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retry(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retry(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( 1, ) @@ -418,7 +418,7 @@ async def test_raw_response_retry(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_retry(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retry(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( 1, ) as response: diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 6d98ebe8..8585a114 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( FirewallListResponse, FirewallCreateResponse, FirewallUpdateResponse, @@ -24,13 +24,13 @@ class TestFirewalls: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.create() assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.create( body={ "droplet_ids": [8043964], @@ -79,7 +79,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.create() assert response.is_closed is True @@ -89,7 +89,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -101,7 +101,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -109,7 +109,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -121,7 +121,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -135,7 +135,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.with_raw_response.retrieve( "", @@ -143,7 +143,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={"name": "frontend-firewall"}, @@ -152,7 +152,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={ @@ -202,7 +202,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={"name": "frontend-firewall"}, @@ -215,7 +215,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={"name": "frontend-firewall"}, @@ -230,7 +230,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.with_raw_response.update( firewall_id="", @@ -239,13 +239,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.list() assert_matches_type(FirewallListResponse, firewall, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.list( page=1, per_page=1, @@ -254,7 +254,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.list() assert response.is_closed is True @@ -264,7 +264,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -276,7 +276,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -284,7 +284,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -296,7 +296,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -310,7 +310,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): client.gpu_droplets.firewalls.with_raw_response.delete( "", @@ -324,13 +324,13 @@ class TestAsyncFirewalls: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.create() assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.create( body={ "droplet_ids": [8043964], @@ -379,7 +379,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.create() assert response.is_closed is True @@ -389,7 +389,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -401,7 +401,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -409,7 +409,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -421,7 +421,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -435,7 +435,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( "", @@ -443,7 +443,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={"name": "frontend-firewall"}, @@ -452,7 +452,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={ @@ -502,7 +502,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={"name": "frontend-firewall"}, @@ -515,7 +515,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.update( firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", firewall={"name": "frontend-firewall"}, @@ -530,7 +530,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.with_raw_response.update( firewall_id="", @@ -539,13 +539,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.list() assert_matches_type(FirewallListResponse, firewall, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.list( page=1, per_page=1, @@ -554,7 +554,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.list() assert response.is_closed is True @@ -564,7 +564,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -576,7 +576,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -584,7 +584,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -596,7 +596,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -610,7 +610,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): await async_client.gpu_droplets.firewalls.with_raw_response.delete( "", diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index 9b8b3183..9ac488d6 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( FloatingIPListResponse, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -23,7 +23,7 @@ class TestFloatingIPs: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.create( droplet_id=2457247, ) @@ -31,7 +31,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.create( droplet_id=2457247, ) @@ -43,7 +43,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.create( droplet_id=2457247, ) as response: @@ -57,7 +57,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.create( region="nyc3", ) @@ -65,7 +65,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.create( region="nyc3", project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", @@ -74,7 +74,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.create( region="nyc3", ) @@ -86,7 +86,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.create( region="nyc3", ) as response: @@ -100,7 +100,7 @@ def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.retrieve( "192.168.1.1", ) @@ -108,7 +108,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.retrieve( "192.168.1.1", ) @@ -120,7 +120,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.retrieve( "192.168.1.1", ) as response: @@ -134,7 +134,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): client.gpu_droplets.floating_ips.with_raw_response.retrieve( "", @@ -142,13 +142,13 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.list() assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.list( page=1, per_page=1, @@ -157,7 +157,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.list() assert response.is_closed is True @@ -167,7 +167,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -179,7 +179,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.delete( "192.168.1.1", ) @@ -187,7 +187,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.delete( "192.168.1.1", ) @@ -199,7 +199,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.delete( "192.168.1.1", ) as response: @@ -213,7 +213,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): client.gpu_droplets.floating_ips.with_raw_response.delete( "", @@ -227,7 +227,7 @@ class TestAsyncFloatingIPs: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.create( droplet_id=2457247, ) @@ -235,7 +235,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( droplet_id=2457247, ) @@ -247,7 +247,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( droplet_id=2457247, ) as response: @@ -261,7 +261,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.create( region="nyc3", ) @@ -269,7 +269,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.create( region="nyc3", project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", @@ -278,7 +278,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( region="nyc3", ) @@ -290,7 +290,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( region="nyc3", ) as response: @@ -304,7 +304,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.retrieve( "192.168.1.1", ) @@ -312,7 +312,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( "192.168.1.1", ) @@ -324,7 +324,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.retrieve( "192.168.1.1", ) as response: @@ -338,7 +338,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( "", @@ -346,13 +346,13 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.list() assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.list( page=1, per_page=1, @@ -361,7 +361,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.list() assert response.is_closed is True @@ -371,7 +371,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -383,7 +383,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.delete( "192.168.1.1", ) @@ -391,7 +391,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.delete( "192.168.1.1", ) @@ -403,7 +403,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.delete( "192.168.1.1", ) as response: @@ -417,7 +417,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): await async_client.gpu_droplets.floating_ips.with_raw_response.delete( "", diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index 5a2a7c0c..bf6bfa4f 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( ImageListResponse, ImageCreateResponse, ImageUpdateResponse, @@ -24,13 +24,13 @@ class TestImages: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: image = client.gpu_droplets.images.create() assert_matches_type(ImageCreateResponse, image, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: image = client.gpu_droplets.images.create( description=" ", distribution="Ubuntu", @@ -43,7 +43,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.create() assert response.is_closed is True @@ -53,7 +53,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -65,7 +65,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: image = client.gpu_droplets.images.retrieve( 0, ) @@ -73,7 +73,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.retrieve( 0, ) @@ -85,7 +85,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.retrieve( 0, ) as response: @@ -99,7 +99,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: image = client.gpu_droplets.images.update( image_id=62137902, ) @@ -107,7 +107,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: image = client.gpu_droplets.images.update( image_id=62137902, description=" ", @@ -118,7 +118,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.update( image_id=62137902, ) @@ -130,7 +130,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.update( image_id=62137902, ) as response: @@ -144,13 +144,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: image = client.gpu_droplets.images.list() assert_matches_type(ImageListResponse, image, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: image = client.gpu_droplets.images.list( page=1, per_page=1, @@ -162,7 +162,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.list() assert response.is_closed is True @@ -172,7 +172,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -184,7 +184,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: image = client.gpu_droplets.images.delete( 0, ) @@ -192,7 +192,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.delete( 0, ) @@ -204,7 +204,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.delete( 0, ) as response: @@ -224,13 +224,13 @@ class TestAsyncImages: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.create() assert_matches_type(ImageCreateResponse, image, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.create( description=" ", distribution="Ubuntu", @@ -243,7 +243,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.create() assert response.is_closed is True @@ -253,7 +253,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -265,7 +265,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.retrieve( 0, ) @@ -273,7 +273,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.retrieve( 0, ) @@ -285,7 +285,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.retrieve( 0, ) as response: @@ -299,7 +299,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.update( image_id=62137902, ) @@ -307,7 +307,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.update( image_id=62137902, description=" ", @@ -318,7 +318,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.update( image_id=62137902, ) @@ -330,7 +330,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.update( image_id=62137902, ) as response: @@ -344,13 +344,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.list() assert_matches_type(ImageListResponse, image, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.list( page=1, per_page=1, @@ -362,7 +362,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.list() assert response.is_closed is True @@ -372,7 +372,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -384,7 +384,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.delete( 0, ) @@ -392,7 +392,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.delete( 0, ) @@ -404,7 +404,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.delete( 0, ) as response: diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index b96c6d52..f660f8f3 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( LoadBalancerListResponse, LoadBalancerCreateResponse, LoadBalancerUpdateResponse, @@ -24,7 +24,7 @@ class TestLoadBalancers: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -39,7 +39,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -110,7 +110,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.create( forwarding_rules=[ { @@ -129,7 +129,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.create( forwarding_rules=[ { @@ -150,7 +150,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -165,7 +165,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -236,7 +236,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.create( forwarding_rules=[ { @@ -255,7 +255,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.create( forwarding_rules=[ { @@ -276,7 +276,7 @@ def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.retrieve( "lb_id", ) @@ -284,7 +284,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.retrieve( "lb_id", ) @@ -296,7 +296,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.retrieve( "lb_id", ) as response: @@ -310,7 +310,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.with_raw_response.retrieve( "", @@ -318,7 +318,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_overload_1(self, client: GradientAI) -> None: + def test_method_update_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -334,7 +334,7 @@ def test_method_update_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_update_with_all_params_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -406,7 +406,7 @@ def test_method_update_with_all_params_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_update_overload_1(self, client: GradientAI) -> None: + def test_raw_response_update_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -426,7 +426,7 @@ def test_raw_response_update_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_update_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -448,7 +448,7 @@ def test_streaming_response_update_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update_overload_1(self, client: GradientAI) -> None: + def test_path_params_update_overload_1(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="", @@ -464,7 +464,7 @@ def test_path_params_update_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_overload_2(self, client: GradientAI) -> None: + def test_method_update_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -480,7 +480,7 @@ def test_method_update_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_update_with_all_params_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -552,7 +552,7 @@ def test_method_update_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_update_overload_2(self, client: GradientAI) -> None: + def test_raw_response_update_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -572,7 +572,7 @@ def test_raw_response_update_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_update_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -594,7 +594,7 @@ def test_streaming_response_update_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update_overload_2(self, client: GradientAI) -> None: + def test_path_params_update_overload_2(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="", @@ -610,13 +610,13 @@ def test_path_params_update_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.list() assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.list( page=1, per_page=1, @@ -625,7 +625,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.list() assert response.is_closed is True @@ -635,7 +635,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -647,7 +647,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.delete( "lb_id", ) @@ -655,7 +655,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.delete( "lb_id", ) @@ -667,7 +667,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.delete( "lb_id", ) as response: @@ -681,7 +681,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.with_raw_response.delete( "", @@ -689,7 +689,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_cache(self, client: GradientAI) -> None: + def test_method_delete_cache(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.delete_cache( "lb_id", ) @@ -697,7 +697,7 @@ def test_method_delete_cache(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete_cache(self, client: GradientAI) -> None: + def test_raw_response_delete_cache(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.delete_cache( "lb_id", ) @@ -709,7 +709,7 @@ def test_raw_response_delete_cache(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete_cache(self, client: GradientAI) -> None: + def test_streaming_response_delete_cache(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( "lb_id", ) as response: @@ -723,7 +723,7 @@ def test_streaming_response_delete_cache(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete_cache(self, client: GradientAI) -> None: + def test_path_params_delete_cache(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): client.gpu_droplets.load_balancers.with_raw_response.delete_cache( "", @@ -737,7 +737,7 @@ class TestAsyncLoadBalancers: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -752,7 +752,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -823,7 +823,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( forwarding_rules=[ { @@ -842,7 +842,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( forwarding_rules=[ { @@ -863,7 +863,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -878,7 +878,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( forwarding_rules=[ { @@ -949,7 +949,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( forwarding_rules=[ { @@ -968,7 +968,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( forwarding_rules=[ { @@ -989,7 +989,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.retrieve( "lb_id", ) @@ -997,7 +997,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( "lb_id", ) @@ -1009,7 +1009,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.retrieve( "lb_id", ) as response: @@ -1023,7 +1023,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( "", @@ -1031,7 +1031,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1047,7 +1047,7 @@ async def test_method_update_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1119,7 +1119,7 @@ async def test_method_update_with_all_params_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1139,7 +1139,7 @@ async def test_raw_response_update_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1161,7 +1161,7 @@ async def test_streaming_response_update_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_path_params_update_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update_overload_1(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="", @@ -1177,7 +1177,7 @@ async def test_path_params_update_overload_1(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_method_update_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1193,7 +1193,7 @@ async def test_method_update_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1265,7 +1265,7 @@ async def test_method_update_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1285,7 +1285,7 @@ async def test_raw_response_update_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", forwarding_rules=[ @@ -1307,7 +1307,7 @@ async def test_streaming_response_update_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_path_params_update_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update_overload_2(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.with_raw_response.update( lb_id="", @@ -1323,13 +1323,13 @@ async def test_path_params_update_overload_2(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.list() assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.list( page=1, per_page=1, @@ -1338,7 +1338,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.list() assert response.is_closed is True @@ -1348,7 +1348,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1360,7 +1360,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.delete( "lb_id", ) @@ -1368,7 +1368,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete( "lb_id", ) @@ -1380,7 +1380,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete( "lb_id", ) as response: @@ -1394,7 +1394,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.with_raw_response.delete( "", @@ -1402,7 +1402,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete_cache(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_cache(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.delete_cache( "lb_id", ) @@ -1410,7 +1410,7 @@ async def test_method_delete_cache(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete_cache(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete_cache(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( "lb_id", ) @@ -1422,7 +1422,7 @@ async def test_raw_response_delete_cache(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_delete_cache(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete_cache(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( "lb_id", ) as response: @@ -1436,7 +1436,7 @@ async def test_streaming_response_delete_cache(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_path_params_delete_cache(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete_cache(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( "", diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py index 1ff11cd7..ec934e9f 100644 --- a/tests/api_resources/gpu_droplets/test_sizes.py +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import SizeListResponse +from gradient.types.gpu_droplets import SizeListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,13 +19,13 @@ class TestSizes: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: size = client.gpu_droplets.sizes.list() assert_matches_type(SizeListResponse, size, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: size = client.gpu_droplets.sizes.list( page=1, per_page=1, @@ -34,7 +34,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.sizes.with_raw_response.list() assert response.is_closed is True @@ -44,7 +44,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.sizes.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,13 +62,13 @@ class TestAsyncSizes: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: size = await async_client.gpu_droplets.sizes.list() assert_matches_type(SizeListResponse, size, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: size = await async_client.gpu_droplets.sizes.list( page=1, per_page=1, @@ -77,7 +77,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.sizes.with_raw_response.list() assert response.is_closed is True @@ -87,7 +87,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.sizes.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py index 413dd993..d4574ece 100644 --- a/tests/api_resources/gpu_droplets/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse +from gradient.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestSnapshots: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.retrieve( 6372321, ) @@ -27,7 +27,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.snapshots.with_raw_response.retrieve( 6372321, ) @@ -39,7 +39,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.snapshots.with_streaming_response.retrieve( 6372321, ) as response: @@ -53,13 +53,13 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.list() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.list( page=1, per_page=1, @@ -69,7 +69,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.snapshots.with_raw_response.list() assert response.is_closed is True @@ -79,7 +79,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.snapshots.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -91,7 +91,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.delete( 6372321, ) @@ -99,7 +99,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.snapshots.with_raw_response.delete( 6372321, ) @@ -111,7 +111,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.snapshots.with_streaming_response.delete( 6372321, ) as response: @@ -131,7 +131,7 @@ class TestAsyncSnapshots: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.retrieve( 6372321, ) @@ -139,7 +139,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.snapshots.with_raw_response.retrieve( 6372321, ) @@ -151,7 +151,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.snapshots.with_streaming_response.retrieve( 6372321, ) as response: @@ -165,13 +165,13 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.list() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.list( page=1, per_page=1, @@ -181,7 +181,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.snapshots.with_raw_response.list() assert response.is_closed is True @@ -191,7 +191,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.snapshots.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -203,7 +203,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.delete( 6372321, ) @@ -211,7 +211,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.snapshots.with_raw_response.delete( 6372321, ) @@ -223,7 +223,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.snapshots.with_streaming_response.delete( 6372321, ) as response: diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index baf6b430..49436220 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets import ( +from gradient.types.gpu_droplets import ( VolumeListResponse, VolumeCreateResponse, VolumeRetrieveResponse, @@ -23,7 +23,7 @@ class TestVolumes: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -33,7 +33,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -48,7 +48,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.create( name="example", region="nyc3", @@ -62,7 +62,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.create( name="example", region="nyc3", @@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -88,7 +88,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -103,7 +103,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.create( name="example", region="nyc3", @@ -117,7 +117,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.create( name="example", region="nyc3", @@ -133,7 +133,7 @@ def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -141,7 +141,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -153,7 +153,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -167,7 +167,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.with_raw_response.retrieve( "", @@ -175,13 +175,13 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.list() assert_matches_type(VolumeListResponse, volume, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.list( name="name", page=1, @@ -192,7 +192,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.list() assert response.is_closed is True @@ -202,7 +202,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -214,7 +214,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -222,7 +222,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -234,7 +234,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -248,7 +248,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.with_raw_response.delete( "", @@ -256,13 +256,13 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_by_name(self, client: GradientAI) -> None: + def test_method_delete_by_name(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete_by_name() assert volume is None @pytest.mark.skip() @parametrize - def test_method_delete_by_name_with_all_params(self, client: GradientAI) -> None: + def test_method_delete_by_name_with_all_params(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete_by_name( name="name", region="nyc3", @@ -271,7 +271,7 @@ def test_method_delete_by_name_with_all_params(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_raw_response_delete_by_name(self, client: GradientAI) -> None: + def test_raw_response_delete_by_name(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.delete_by_name() assert response.is_closed is True @@ -281,7 +281,7 @@ def test_raw_response_delete_by_name(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete_by_name(self, client: GradientAI) -> None: + def test_streaming_response_delete_by_name(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -299,7 +299,7 @@ class TestAsyncVolumes: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -309,7 +309,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -324,7 +324,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.create( name="example", region="nyc3", @@ -338,7 +338,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.create( name="example", region="nyc3", @@ -354,7 +354,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -364,7 +364,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( name="example", region="nyc3", @@ -379,7 +379,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.create( name="example", region="nyc3", @@ -393,7 +393,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.create( name="example", region="nyc3", @@ -409,7 +409,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -417,7 +417,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -429,7 +429,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.retrieve( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -443,7 +443,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.with_raw_response.retrieve( "", @@ -451,13 +451,13 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.list() assert_matches_type(VolumeListResponse, volume, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.list( name="name", page=1, @@ -468,7 +468,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.list() assert response.is_closed is True @@ -478,7 +478,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -490,7 +490,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -498,7 +498,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) @@ -510,7 +510,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.delete( "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", ) as response: @@ -524,7 +524,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.with_raw_response.delete( "", @@ -532,13 +532,13 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete_by_name(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_by_name(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete_by_name() assert volume is None @pytest.mark.skip() @parametrize - async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete_by_name( name="name", region="nyc3", @@ -547,7 +547,7 @@ async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_raw_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete_by_name(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.delete_by_name() assert response.is_closed is True @@ -557,7 +557,7 @@ async def test_raw_response_delete_by_name(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete_by_name(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py index 40d9b4eb..19088e9e 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_actions.py +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.volumes import ( +from gradient.types.gpu_droplets.volumes import ( ActionListResponse, ActionRetrieveResponse, ActionInitiateByIDResponse, @@ -24,7 +24,7 @@ class TestActions: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -33,7 +33,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: + def test_method_retrieve_with_all_params(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -44,7 +44,7 @@ def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -57,7 +57,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -72,7 +72,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.actions.with_raw_response.retrieve( action_id=36804636, @@ -81,7 +81,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -89,7 +89,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", page=1, @@ -99,7 +99,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -111,7 +111,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) as response: @@ -125,7 +125,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.actions.with_raw_response.list( volume_id="", @@ -133,7 +133,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_by_id_overload_1(self, client: GradientAI) -> None: + def test_method_initiate_by_id_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -143,7 +143,7 @@ def test_method_initiate_by_id_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_by_id_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_initiate_by_id_with_all_params_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -157,7 +157,7 @@ def test_method_initiate_by_id_with_all_params_overload_1(self, client: Gradient @pytest.mark.skip() @parametrize - def test_raw_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: + def test_raw_response_initiate_by_id_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -171,7 +171,7 @@ def test_raw_response_initiate_by_id_overload_1(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_initiate_by_id_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -187,7 +187,7 @@ def test_streaming_response_initiate_by_id_overload_1(self, client: GradientAI) @pytest.mark.skip() @parametrize - def test_path_params_initiate_by_id_overload_1(self, client: GradientAI) -> None: + def test_path_params_initiate_by_id_overload_1(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="", @@ -197,7 +197,7 @@ def test_path_params_initiate_by_id_overload_1(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_method_initiate_by_id_overload_2(self, client: GradientAI) -> None: + def test_method_initiate_by_id_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -207,7 +207,7 @@ def test_method_initiate_by_id_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_by_id_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_initiate_by_id_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -220,7 +220,7 @@ def test_method_initiate_by_id_with_all_params_overload_2(self, client: Gradient @pytest.mark.skip() @parametrize - def test_raw_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: + def test_raw_response_initiate_by_id_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -234,7 +234,7 @@ def test_raw_response_initiate_by_id_overload_2(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_initiate_by_id_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -250,7 +250,7 @@ def test_streaming_response_initiate_by_id_overload_2(self, client: GradientAI) @pytest.mark.skip() @parametrize - def test_path_params_initiate_by_id_overload_2(self, client: GradientAI) -> None: + def test_path_params_initiate_by_id_overload_2(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="", @@ -260,7 +260,7 @@ def test_path_params_initiate_by_id_overload_2(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_method_initiate_by_id_overload_3(self, client: GradientAI) -> None: + def test_method_initiate_by_id_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -270,7 +270,7 @@ def test_method_initiate_by_id_overload_3(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_by_id_with_all_params_overload_3(self, client: GradientAI) -> None: + def test_method_initiate_by_id_with_all_params_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -283,7 +283,7 @@ def test_method_initiate_by_id_with_all_params_overload_3(self, client: Gradient @pytest.mark.skip() @parametrize - def test_raw_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: + def test_raw_response_initiate_by_id_overload_3(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -297,7 +297,7 @@ def test_raw_response_initiate_by_id_overload_3(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: + def test_streaming_response_initiate_by_id_overload_3(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -313,7 +313,7 @@ def test_streaming_response_initiate_by_id_overload_3(self, client: GradientAI) @pytest.mark.skip() @parametrize - def test_path_params_initiate_by_id_overload_3(self, client: GradientAI) -> None: + def test_path_params_initiate_by_id_overload_3(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="", @@ -323,7 +323,7 @@ def test_path_params_initiate_by_id_overload_3(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_method_initiate_by_name_overload_1(self, client: GradientAI) -> None: + def test_method_initiate_by_name_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -332,7 +332,7 @@ def test_method_initiate_by_name_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_by_name_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_initiate_by_name_with_all_params_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -345,7 +345,7 @@ def test_method_initiate_by_name_with_all_params_overload_1(self, client: Gradie @pytest.mark.skip() @parametrize - def test_raw_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: + def test_raw_response_initiate_by_name_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -358,7 +358,7 @@ def test_raw_response_initiate_by_name_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_initiate_by_name_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -373,7 +373,7 @@ def test_streaming_response_initiate_by_name_overload_1(self, client: GradientAI @pytest.mark.skip() @parametrize - def test_method_initiate_by_name_overload_2(self, client: GradientAI) -> None: + def test_method_initiate_by_name_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -382,7 +382,7 @@ def test_method_initiate_by_name_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_initiate_by_name_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_initiate_by_name_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -394,7 +394,7 @@ def test_method_initiate_by_name_with_all_params_overload_2(self, client: Gradie @pytest.mark.skip() @parametrize - def test_raw_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: + def test_raw_response_initiate_by_name_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -407,7 +407,7 @@ def test_raw_response_initiate_by_name_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_streaming_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_initiate_by_name_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -428,7 +428,7 @@ class TestAsyncActions: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -437,7 +437,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -448,7 +448,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -461,7 +461,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( action_id=36804636, volume_id="7724db7c-e098-11e5-b522-000f53304e51", @@ -476,7 +476,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( action_id=36804636, @@ -485,7 +485,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -493,7 +493,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", page=1, @@ -503,7 +503,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -515,7 +515,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) as response: @@ -529,7 +529,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.actions.with_raw_response.list( volume_id="", @@ -537,7 +537,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -547,7 +547,7 @@ async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -561,7 +561,7 @@ async def test_method_initiate_by_id_with_all_params_overload_1(self, async_clie @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -575,7 +575,7 @@ async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -591,7 +591,7 @@ async def test_streaming_response_initiate_by_id_overload_1(self, async_client: @pytest.mark.skip() @parametrize - async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="", @@ -601,7 +601,7 @@ async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -611,7 +611,7 @@ async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -624,7 +624,7 @@ async def test_method_initiate_by_id_with_all_params_overload_2(self, async_clie @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -638,7 +638,7 @@ async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", droplet_id=11612190, @@ -654,7 +654,7 @@ async def test_streaming_response_initiate_by_id_overload_2(self, async_client: @pytest.mark.skip() @parametrize - async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="", @@ -664,7 +664,7 @@ async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -674,7 +674,7 @@ async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -687,7 +687,7 @@ async def test_method_initiate_by_id_with_all_params_overload_3(self, async_clie @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -701,7 +701,7 @@ async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( volume_id="7724db7c-e098-11e5-b522-000f53304e51", size_gigabytes=16384, @@ -717,7 +717,7 @@ async def test_streaming_response_initiate_by_id_overload_3(self, async_client: @pytest.mark.skip() @parametrize - async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( volume_id="", @@ -727,7 +727,7 @@ async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -736,7 +736,7 @@ async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -749,7 +749,7 @@ async def test_method_initiate_by_name_with_all_params_overload_1(self, async_cl @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -762,7 +762,7 @@ async def test_raw_response_initiate_by_name_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -777,7 +777,7 @@ async def test_streaming_response_initiate_by_name_overload_1(self, async_client @pytest.mark.skip() @parametrize - async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -786,7 +786,7 @@ async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( droplet_id=11612190, type="attach", @@ -798,7 +798,7 @@ async def test_method_initiate_by_name_with_all_params_overload_2(self, async_cl @pytest.mark.skip() @parametrize - async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( droplet_id=11612190, type="attach", @@ -811,7 +811,7 @@ async def test_raw_response_initiate_by_name_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( droplet_id=11612190, type="attach", diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index 4884d372..5037c7bb 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.gpu_droplets.volumes import ( +from gradient.types.gpu_droplets.volumes import ( SnapshotListResponse, SnapshotCreateResponse, SnapshotRetrieveResponse, @@ -23,7 +23,7 @@ class TestSnapshots: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -32,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -55,7 +55,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -70,7 +70,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_create(self, client: GradientAI) -> None: + def test_path_params_create(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.snapshots.with_raw_response.create( volume_id="", @@ -79,7 +79,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.retrieve( "snapshot_id", ) @@ -87,7 +87,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( "snapshot_id", ) @@ -99,7 +99,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( "snapshot_id", ) as response: @@ -113,7 +113,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( "", @@ -121,7 +121,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -129,7 +129,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", page=1, @@ -139,7 +139,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -151,7 +151,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) as response: @@ -165,7 +165,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): client.gpu_droplets.volumes.snapshots.with_raw_response.list( volume_id="", @@ -173,7 +173,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.delete( "snapshot_id", ) @@ -181,7 +181,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.delete( "snapshot_id", ) @@ -193,7 +193,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( "snapshot_id", ) as response: @@ -207,7 +207,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): client.gpu_droplets.volumes.snapshots.with_raw_response.delete( "", @@ -221,7 +221,7 @@ class TestAsyncSnapshots: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -230,7 +230,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -240,7 +240,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -253,7 +253,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.create( volume_id="7724db7c-e098-11e5-b522-000f53304e51", name="big-data-snapshot1475261774", @@ -268,7 +268,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( volume_id="", @@ -277,7 +277,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.retrieve( "snapshot_id", ) @@ -285,7 +285,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( "snapshot_id", ) @@ -297,7 +297,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( "snapshot_id", ) as response: @@ -311,7 +311,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( "", @@ -319,7 +319,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -327,7 +327,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", page=1, @@ -337,7 +337,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) @@ -349,7 +349,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.list( volume_id="7724db7c-e098-11e5-b522-000f53304e51", ) as response: @@ -363,7 +363,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( volume_id="", @@ -371,7 +371,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.delete( "snapshot_id", ) @@ -379,7 +379,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( "snapshot_id", ) @@ -391,7 +391,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( "snapshot_id", ) as response: @@ -405,7 +405,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( "", diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 85ad49da..f22947ed 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.inference import ( +from gradient.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -25,13 +25,13 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: api_key = client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: api_key = client.inference.api_keys.create( name="Production Key", ) @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.create() assert response.is_closed is True @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: api_key = client.inference.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -69,7 +69,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: api_key = client.inference.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', @@ -79,7 +79,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -91,7 +91,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -105,7 +105,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.inference.api_keys.with_raw_response.update( path_api_key_uuid="", @@ -113,13 +113,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: api_key = client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: api_key = client.inference.api_keys.list( page=0, per_page=0, @@ -128,7 +128,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.list() assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -150,7 +150,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: api_key = client.inference.api_keys.delete( "api_key_uuid", ) @@ -158,7 +158,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -170,7 +170,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -184,7 +184,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.inference.api_keys.with_raw_response.delete( "", @@ -192,7 +192,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_regenerate(self, client: GradientAI) -> None: + def test_method_update_regenerate(self, client: Gradient) -> None: api_key = client.inference.api_keys.update_regenerate( "api_key_uuid", ) @@ -200,7 +200,7 @@ def test_method_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update_regenerate(self, client: GradientAI) -> None: + def test_raw_response_update_regenerate(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -212,7 +212,7 @@ def test_raw_response_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: + def test_streaming_response_update_regenerate(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: @@ -226,7 +226,7 @@ def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update_regenerate(self, client: GradientAI) -> None: + def test_path_params_update_regenerate(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.inference.api_keys.with_raw_response.update_regenerate( "", @@ -240,13 +240,13 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.create( name="Production Key", ) @@ -254,7 +254,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.create() assert response.is_closed is True @@ -264,7 +264,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -276,7 +276,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -284,7 +284,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', @@ -294,7 +294,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -306,7 +306,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -320,7 +320,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.inference.api_keys.with_raw_response.update( path_api_key_uuid="", @@ -328,13 +328,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.list( page=0, per_page=0, @@ -343,7 +343,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.list() assert response.is_closed is True @@ -353,7 +353,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -365,7 +365,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.delete( "api_key_uuid", ) @@ -373,7 +373,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -385,7 +385,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -399,7 +399,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.inference.api_keys.with_raw_response.delete( "", @@ -407,7 +407,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_regenerate(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update_regenerate( "api_key_uuid", ) @@ -415,7 +415,7 @@ async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update_regenerate(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -427,7 +427,7 @@ async def test_raw_response_update_regenerate(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update_regenerate(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: @@ -441,7 +441,7 @@ async def test_streaming_response_update_regenerate(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update_regenerate(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.inference.api_keys.with_raw_response.update_regenerate( "", diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index ebb0841a..a5734cea 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradient.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, @@ -23,7 +23,7 @@ class TestDataSources: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -31,7 +31,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ @@ -57,7 +57,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.knowledge_bases.data_sources.with_raw_response.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -69,7 +69,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.knowledge_bases.data_sources.with_streaming_response.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -83,7 +83,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_create(self, client: GradientAI) -> None: + def test_path_params_create(self, client: Gradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" ): @@ -93,7 +93,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -101,7 +101,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -111,7 +111,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -123,7 +123,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.knowledge_bases.data_sources.with_streaming_response.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -137,7 +137,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: GradientAI) -> None: + def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="", @@ -145,7 +145,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -154,7 +154,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -167,7 +167,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.knowledge_bases.data_sources.with_streaming_response.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -182,7 +182,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -203,7 +203,7 @@ class TestAsyncDataSources: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -211,7 +211,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ @@ -237,7 +237,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -249,7 +249,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -263,7 +263,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" ): @@ -273,7 +273,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -281,7 +281,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -291,7 +291,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -303,7 +303,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.list( knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -317,7 +317,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="", @@ -325,7 +325,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -334,7 +334,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -347,7 +347,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', @@ -362,7 +362,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index b0185941..231b22af 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradient.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -25,13 +25,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create( data_source_uuids=["example string"], knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True @@ -50,7 +50,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,7 +62,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) @@ -70,7 +70,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -82,7 +82,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: @@ -96,7 +96,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", @@ -104,13 +104,13 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, @@ -119,7 +119,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True @@ -129,7 +129,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -141,7 +141,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_data_sources(self, client: GradientAI) -> None: + def test_method_retrieve_data_sources(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) @@ -149,7 +149,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: + def test_raw_response_retrieve_data_sources(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -161,7 +161,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: + def test_streaming_response_retrieve_data_sources(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: @@ -175,7 +175,7 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: + def test_path_params_retrieve_data_sources(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", @@ -183,7 +183,7 @@ def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_cancel(self, client: GradientAI) -> None: + def test_method_update_cancel(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -191,7 +191,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: + def test_method_update_cancel_with_all_params(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_uuid='"12345678-1234-1234-1234-123456789012"', @@ -200,7 +200,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update_cancel(self, client: GradientAI) -> None: + def test_raw_response_update_cancel(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -212,7 +212,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update_cancel(self, client: GradientAI) -> None: + def test_streaming_response_update_cancel(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -226,7 +226,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update_cancel(self, client: GradientAI) -> None: + def test_path_params_update_cancel(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", @@ -240,13 +240,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create( data_source_uuids=["example string"], knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', @@ -255,7 +255,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True @@ -265,7 +265,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -277,7 +277,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) @@ -285,7 +285,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -297,7 +297,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: @@ -311,7 +311,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", @@ -319,13 +319,13 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, @@ -334,7 +334,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True @@ -344,7 +344,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -356,7 +356,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_data_sources(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) @@ -364,7 +364,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -376,7 +376,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: @@ -390,7 +390,7 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", @@ -398,7 +398,7 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize - async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_cancel(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -406,7 +406,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_uuid='"12345678-1234-1234-1234-123456789012"', @@ -415,7 +415,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update_cancel(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -427,7 +427,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update_cancel(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -441,7 +441,7 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update_cancel(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 6b3d99a3..5bb7a1e9 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.models.providers import ( +from gradient.types.models.providers import ( AnthropicListResponse, AnthropicCreateResponse, AnthropicDeleteResponse, @@ -26,13 +26,13 @@ class TestAnthropic: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.create() assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.create( api_key='"sk-ant-12345678901234567890123456789012"', name='"Production Key"', @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.create() assert response.is_closed is True @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.retrieve( "api_key_uuid", ) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.retrieve( "api_key_uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -97,7 +97,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.models.providers.anthropic.with_raw_response.retrieve( "", @@ -105,7 +105,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -113,7 +113,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -124,7 +124,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -136,7 +136,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -150,7 +150,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid="", @@ -158,13 +158,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list() assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list( page=0, per_page=0, @@ -173,7 +173,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.list() assert response.is_closed is True @@ -183,7 +183,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,7 +195,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.delete( "api_key_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.delete( "api_key_uuid", ) @@ -215,7 +215,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -229,7 +229,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.models.providers.anthropic.with_raw_response.delete( "", @@ -237,7 +237,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_agents(self, client: GradientAI) -> None: + def test_method_list_agents(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -245,7 +245,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + def test_method_list_agents_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -255,7 +255,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_agents(self, client: GradientAI) -> None: + def test_raw_response_list_agents(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -267,7 +267,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_agents(self, client: GradientAI) -> None: + def test_streaming_response_list_agents(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -281,7 +281,7 @@ def test_streaming_response_list_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list_agents(self, client: GradientAI) -> None: + def test_path_params_list_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.models.providers.anthropic.with_raw_response.list_agents( uuid="", @@ -295,13 +295,13 @@ class TestAsyncAnthropic: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.create() assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.create( api_key='"sk-ant-12345678901234567890123456789012"', name='"Production Key"', @@ -310,7 +310,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.create() assert response.is_closed is True @@ -320,7 +320,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -332,7 +332,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.retrieve( "api_key_uuid", ) @@ -340,7 +340,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.retrieve( "api_key_uuid", ) @@ -352,7 +352,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -366,7 +366,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.models.providers.anthropic.with_raw_response.retrieve( "", @@ -374,7 +374,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -382,7 +382,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -393,7 +393,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -405,7 +405,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -419,7 +419,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.models.providers.anthropic.with_raw_response.update( path_api_key_uuid="", @@ -427,13 +427,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list() assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list( page=0, per_page=0, @@ -442,7 +442,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list() assert response.is_closed is True @@ -452,7 +452,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -464,7 +464,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.delete( "api_key_uuid", ) @@ -472,7 +472,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.delete( "api_key_uuid", ) @@ -484,7 +484,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -498,7 +498,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.models.providers.anthropic.with_raw_response.delete( "", @@ -506,7 +506,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_agents(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -514,7 +514,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -524,7 +524,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -536,7 +536,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -550,7 +550,7 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.models.providers.anthropic.with_raw_response.list_agents( uuid="", diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index bdde97ca..ed2cfc8e 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.models.providers import ( +from gradient.types.models.providers import ( OpenAIListResponse, OpenAICreateResponse, OpenAIDeleteResponse, @@ -26,13 +26,13 @@ class TestOpenAI: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: openai = client.models.providers.openai.create() assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.create( api_key='"sk-proj--123456789098765432123456789"', name='"Production Key"', @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.create() assert response.is_closed is True @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve( "api_key_uuid", ) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.retrieve( "api_key_uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -97,7 +97,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.models.providers.openai.with_raw_response.retrieve( "", @@ -105,7 +105,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: openai = client.models.providers.openai.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -113,7 +113,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -124,7 +124,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -136,7 +136,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -150,7 +150,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.models.providers.openai.with_raw_response.update( path_api_key_uuid="", @@ -158,13 +158,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: openai = client.models.providers.openai.list() assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.list( page=0, per_page=0, @@ -173,7 +173,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.list() assert response.is_closed is True @@ -183,7 +183,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,7 +195,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: openai = client.models.providers.openai.delete( "api_key_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.delete( "api_key_uuid", ) @@ -215,7 +215,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -229,7 +229,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.models.providers.openai.with_raw_response.delete( "", @@ -237,7 +237,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_agents(self, client: GradientAI) -> None: + def test_method_retrieve_agents(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -245,7 +245,7 @@ def test_method_retrieve_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: + def test_method_retrieve_agents_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -255,7 +255,7 @@ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> Non @pytest.mark.skip() @parametrize - def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: + def test_raw_response_retrieve_agents(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -267,7 +267,7 @@ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: + def test_streaming_response_retrieve_agents(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -281,7 +281,7 @@ def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve_agents(self, client: GradientAI) -> None: + def test_path_params_retrieve_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.models.providers.openai.with_raw_response.retrieve_agents( uuid="", @@ -295,13 +295,13 @@ class TestAsyncOpenAI: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.create() assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.create( api_key='"sk-proj--123456789098765432123456789"', name='"Production Key"', @@ -310,7 +310,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.create() assert response.is_closed is True @@ -320,7 +320,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -332,7 +332,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve( "api_key_uuid", ) @@ -340,7 +340,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve( "api_key_uuid", ) @@ -352,7 +352,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve( "api_key_uuid", ) as response: @@ -366,7 +366,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.models.providers.openai.with_raw_response.retrieve( "", @@ -374,7 +374,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -382,7 +382,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', api_key='"sk-ant-12345678901234567890123456789012"', @@ -393,7 +393,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -405,7 +405,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.update( path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -419,7 +419,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.models.providers.openai.with_raw_response.update( path_api_key_uuid="", @@ -427,13 +427,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.list() assert_matches_type(OpenAIListResponse, openai, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.list( page=0, per_page=0, @@ -442,7 +442,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.list() assert response.is_closed is True @@ -452,7 +452,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -464,7 +464,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.delete( "api_key_uuid", ) @@ -472,7 +472,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.delete( "api_key_uuid", ) @@ -484,7 +484,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.delete( "api_key_uuid", ) as response: @@ -498,7 +498,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.models.providers.openai.with_raw_response.delete( "", @@ -506,7 +506,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_agents(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -514,7 +514,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> No @pytest.mark.skip() @parametrize - async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, @@ -524,7 +524,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve_agents(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -536,7 +536,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve_agents( uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -550,7 +550,7 @@ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize - async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.models.providers.openai.with_raw_response.retrieve_agents( uuid="", diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2f68a06f..987f2eda 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradient.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, @@ -26,13 +26,13 @@ class TestAgents: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: agent = client.agents.create() assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: agent = client.agents.create( anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', description='"My Agent Description"', @@ -49,7 +49,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.with_raw_response.create() assert response.is_closed is True @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -71,7 +71,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: agent = client.agents.retrieve( "uuid", ) @@ -79,7 +79,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.with_raw_response.retrieve( "uuid", ) @@ -91,7 +91,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.with_streaming_response.retrieve( "uuid", ) as response: @@ -105,7 +105,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.with_raw_response.retrieve( "", @@ -113,7 +113,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: agent = client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -121,7 +121,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: agent = client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', @@ -145,7 +145,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.with_raw_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -157,7 +157,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.with_streaming_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -171,7 +171,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.agents.with_raw_response.update( path_uuid="", @@ -179,13 +179,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: agent = client.agents.list() assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: agent = client.agents.list( only_deployed=True, page=0, @@ -195,7 +195,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.with_raw_response.list() assert response.is_closed is True @@ -205,7 +205,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -217,7 +217,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: agent = client.agents.delete( "uuid", ) @@ -225,7 +225,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.with_raw_response.delete( "uuid", ) @@ -237,7 +237,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.with_streaming_response.delete( "uuid", ) as response: @@ -251,7 +251,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.with_raw_response.delete( "", @@ -259,7 +259,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_status(self, client: GradientAI) -> None: + def test_method_update_status(self, client: Gradient) -> None: agent = client.agents.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -267,7 +267,7 @@ def test_method_update_status(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_status_with_all_params(self, client: GradientAI) -> None: + def test_method_update_status_with_all_params(self, client: Gradient) -> None: agent = client.agents.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_uuid='"12345678-1234-1234-1234-123456789012"', @@ -277,7 +277,7 @@ def test_method_update_status_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update_status(self, client: GradientAI) -> None: + def test_raw_response_update_status(self, client: Gradient) -> None: response = client.agents.with_raw_response.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -289,7 +289,7 @@ def test_raw_response_update_status(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update_status(self, client: GradientAI) -> None: + def test_streaming_response_update_status(self, client: Gradient) -> None: with client.agents.with_streaming_response.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -303,7 +303,7 @@ def test_streaming_response_update_status(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update_status(self, client: GradientAI) -> None: + def test_path_params_update_status(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.agents.with_raw_response.update_status( path_uuid="", @@ -317,13 +317,13 @@ class TestAsyncAgents: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.create() assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.create( anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', description='"My Agent Description"', @@ -340,7 +340,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.create() assert response.is_closed is True @@ -350,7 +350,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -362,7 +362,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.retrieve( "uuid", ) @@ -370,7 +370,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.retrieve( "uuid", ) @@ -382,7 +382,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.retrieve( "uuid", ) as response: @@ -396,7 +396,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.with_raw_response.retrieve( "", @@ -404,7 +404,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -412,7 +412,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', @@ -436,7 +436,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -448,7 +448,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -462,7 +462,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.agents.with_raw_response.update( path_uuid="", @@ -470,13 +470,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.list() assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.list( only_deployed=True, page=0, @@ -486,7 +486,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.list() assert response.is_closed is True @@ -496,7 +496,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.delete( "uuid", ) @@ -516,7 +516,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.delete( "uuid", ) @@ -528,7 +528,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.delete( "uuid", ) as response: @@ -542,7 +542,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.with_raw_response.delete( "", @@ -550,7 +550,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_status(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -558,7 +558,7 @@ async def test_method_update_status(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_status_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', body_uuid='"12345678-1234-1234-1234-123456789012"', @@ -568,7 +568,7 @@ async def test_method_update_status_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update_status(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -580,7 +580,7 @@ async def test_raw_response_update_status(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update_status(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.update_status( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -594,7 +594,7 @@ async def test_streaming_response_update_status(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update_status(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.agents.with_raw_response.update_status( path_uuid="", diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index cbc7e63b..485fd5f9 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradient.types import ( GPUDropletListResponse, GPUDropletCreateResponse, GPUDropletRetrieveResponse, @@ -27,7 +27,7 @@ class TestGPUDroplets: @pytest.mark.skip() @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: + def test_method_create_overload_1(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( image="ubuntu-20-04-x64", name="example.com", @@ -37,7 +37,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( image="ubuntu-20-04-x64", name="example.com", @@ -63,7 +63,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.create( image="ubuntu-20-04-x64", name="example.com", @@ -77,7 +77,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.create( image="ubuntu-20-04-x64", name="example.com", @@ -93,7 +93,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: + def test_method_create_overload_2(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -103,7 +103,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -129,7 +129,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N @pytest.mark.skip() @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -143,7 +143,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -159,7 +159,7 @@ def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.retrieve( 1, ) @@ -167,7 +167,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.retrieve( 1, ) @@ -179,7 +179,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.retrieve( 1, ) as response: @@ -193,13 +193,13 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list() assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list( name="name", page=1, @@ -211,7 +211,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list() assert response.is_closed is True @@ -221,7 +221,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -233,7 +233,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.delete( 1, ) @@ -241,7 +241,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.delete( 1, ) @@ -253,7 +253,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.delete( 1, ) as response: @@ -267,7 +267,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete_by_tag(self, client: GradientAI) -> None: + def test_method_delete_by_tag(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.delete_by_tag( tag_name="tag_name", ) @@ -275,7 +275,7 @@ def test_method_delete_by_tag(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete_by_tag(self, client: GradientAI) -> None: + def test_raw_response_delete_by_tag(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.delete_by_tag( tag_name="tag_name", ) @@ -287,7 +287,7 @@ def test_raw_response_delete_by_tag(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete_by_tag(self, client: GradientAI) -> None: + def test_streaming_response_delete_by_tag(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.delete_by_tag( tag_name="tag_name", ) as response: @@ -301,7 +301,7 @@ def test_streaming_response_delete_by_tag(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_firewalls(self, client: GradientAI) -> None: + def test_method_list_firewalls(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_firewalls( droplet_id=3164444, ) @@ -309,7 +309,7 @@ def test_method_list_firewalls(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_firewalls_with_all_params(self, client: GradientAI) -> None: + def test_method_list_firewalls_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_firewalls( droplet_id=3164444, page=1, @@ -319,7 +319,7 @@ def test_method_list_firewalls_with_all_params(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_raw_response_list_firewalls(self, client: GradientAI) -> None: + def test_raw_response_list_firewalls(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_firewalls( droplet_id=3164444, ) @@ -331,7 +331,7 @@ def test_raw_response_list_firewalls(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_firewalls(self, client: GradientAI) -> None: + def test_streaming_response_list_firewalls(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_firewalls( droplet_id=3164444, ) as response: @@ -345,7 +345,7 @@ def test_streaming_response_list_firewalls(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_kernels(self, client: GradientAI) -> None: + def test_method_list_kernels(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_kernels( droplet_id=3164444, ) @@ -353,7 +353,7 @@ def test_method_list_kernels(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_kernels_with_all_params(self, client: GradientAI) -> None: + def test_method_list_kernels_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_kernels( droplet_id=3164444, page=1, @@ -363,7 +363,7 @@ def test_method_list_kernels_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_kernels(self, client: GradientAI) -> None: + def test_raw_response_list_kernels(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_kernels( droplet_id=3164444, ) @@ -375,7 +375,7 @@ def test_raw_response_list_kernels(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_kernels(self, client: GradientAI) -> None: + def test_streaming_response_list_kernels(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_kernels( droplet_id=3164444, ) as response: @@ -389,7 +389,7 @@ def test_streaming_response_list_kernels(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_neighbors(self, client: GradientAI) -> None: + def test_method_list_neighbors(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_neighbors( 1, ) @@ -397,7 +397,7 @@ def test_method_list_neighbors(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list_neighbors(self, client: GradientAI) -> None: + def test_raw_response_list_neighbors(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_neighbors( 1, ) @@ -409,7 +409,7 @@ def test_raw_response_list_neighbors(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_neighbors(self, client: GradientAI) -> None: + def test_streaming_response_list_neighbors(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_neighbors( 1, ) as response: @@ -423,7 +423,7 @@ def test_streaming_response_list_neighbors(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_snapshots(self, client: GradientAI) -> None: + def test_method_list_snapshots(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_snapshots( droplet_id=3164444, ) @@ -431,7 +431,7 @@ def test_method_list_snapshots(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list_snapshots_with_all_params(self, client: GradientAI) -> None: + def test_method_list_snapshots_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_snapshots( droplet_id=3164444, page=1, @@ -441,7 +441,7 @@ def test_method_list_snapshots_with_all_params(self, client: GradientAI) -> None @pytest.mark.skip() @parametrize - def test_raw_response_list_snapshots(self, client: GradientAI) -> None: + def test_raw_response_list_snapshots(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_snapshots( droplet_id=3164444, ) @@ -453,7 +453,7 @@ def test_raw_response_list_snapshots(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list_snapshots(self, client: GradientAI) -> None: + def test_streaming_response_list_snapshots(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_snapshots( droplet_id=3164444, ) as response: @@ -473,7 +473,7 @@ class TestAsyncGPUDroplets: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( image="ubuntu-20-04-x64", name="example.com", @@ -483,7 +483,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( image="ubuntu-20-04-x64", name="example.com", @@ -509,7 +509,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.create( image="ubuntu-20-04-x64", name="example.com", @@ -523,7 +523,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.create( image="ubuntu-20-04-x64", name="example.com", @@ -539,7 +539,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -549,7 +549,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -575,7 +575,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -589,7 +589,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.create( image="ubuntu-20-04-x64", names=["sub-01.example.com", "sub-02.example.com"], @@ -605,7 +605,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.retrieve( 1, ) @@ -613,7 +613,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.retrieve( 1, ) @@ -625,7 +625,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.retrieve( 1, ) as response: @@ -639,13 +639,13 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list() assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list( name="name", page=1, @@ -657,7 +657,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list() assert response.is_closed is True @@ -667,7 +667,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -679,7 +679,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.delete( 1, ) @@ -687,7 +687,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.delete( 1, ) @@ -699,7 +699,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.delete( 1, ) as response: @@ -713,7 +713,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete_by_tag(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.delete_by_tag( tag_name="tag_name", ) @@ -721,7 +721,7 @@ async def test_method_delete_by_tag(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_raw_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete_by_tag(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.delete_by_tag( tag_name="tag_name", ) @@ -733,7 +733,7 @@ async def test_raw_response_delete_by_tag(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize - async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.delete_by_tag( tag_name="tag_name", ) as response: @@ -747,7 +747,7 @@ async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradien @pytest.mark.skip() @parametrize - async def test_method_list_firewalls(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_firewalls(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_firewalls( droplet_id=3164444, ) @@ -755,7 +755,7 @@ async def test_method_list_firewalls(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_firewalls( droplet_id=3164444, page=1, @@ -765,7 +765,7 @@ async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_raw_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_firewalls(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_firewalls( droplet_id=3164444, ) @@ -777,7 +777,7 @@ async def test_raw_response_list_firewalls(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_firewalls(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_firewalls( droplet_id=3164444, ) as response: @@ -791,7 +791,7 @@ async def test_streaming_response_list_firewalls(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize - async def test_method_list_kernels(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_kernels(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_kernels( droplet_id=3164444, ) @@ -799,7 +799,7 @@ async def test_method_list_kernels(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_kernels( droplet_id=3164444, page=1, @@ -809,7 +809,7 @@ async def test_method_list_kernels_with_all_params(self, async_client: AsyncGrad @pytest.mark.skip() @parametrize - async def test_raw_response_list_kernels(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_kernels(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_kernels( droplet_id=3164444, ) @@ -821,7 +821,7 @@ async def test_raw_response_list_kernels(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_streaming_response_list_kernels(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_kernels(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_kernels( droplet_id=3164444, ) as response: @@ -835,7 +835,7 @@ async def test_streaming_response_list_kernels(self, async_client: AsyncGradient @pytest.mark.skip() @parametrize - async def test_method_list_neighbors(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_neighbors(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_neighbors( 1, ) @@ -843,7 +843,7 @@ async def test_method_list_neighbors(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_raw_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_neighbors(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_neighbors( 1, ) @@ -855,7 +855,7 @@ async def test_raw_response_list_neighbors(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_neighbors(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_neighbors( 1, ) as response: @@ -869,7 +869,7 @@ async def test_streaming_response_list_neighbors(self, async_client: AsyncGradie @pytest.mark.skip() @parametrize - async def test_method_list_snapshots(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_snapshots(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_snapshots( droplet_id=3164444, ) @@ -877,7 +877,7 @@ async def test_method_list_snapshots(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_snapshots( droplet_id=3164444, page=1, @@ -887,7 +887,7 @@ async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGr @pytest.mark.skip() @parametrize - async def test_raw_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list_snapshots(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_snapshots( droplet_id=3164444, ) @@ -899,7 +899,7 @@ async def test_raw_response_list_snapshots(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_streaming_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list_snapshots(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_snapshots( droplet_id=3164444, ) as response: diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index c4d179cc..1628fdbe 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradient.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, @@ -25,13 +25,13 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize - def test_method_create(self, client: GradientAI) -> None: + def test_method_create(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.create() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: + def test_method_create_with_all_params(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.create( database_id='"12345678-1234-1234-1234-123456789012"', datasources=[ @@ -74,7 +74,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: + def test_raw_response_create(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.create() assert response.is_closed is True @@ -84,7 +84,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: + def test_streaming_response_create(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -96,7 +96,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: + def test_method_retrieve(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.retrieve( "uuid", ) @@ -104,7 +104,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: + def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.retrieve( "uuid", ) @@ -116,7 +116,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: + def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.retrieve( "uuid", ) as response: @@ -130,7 +130,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: + def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.knowledge_bases.with_raw_response.retrieve( "", @@ -138,7 +138,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update(self, client: GradientAI) -> None: + def test_method_update(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -146,7 +146,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: + def test_method_update_with_all_params(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', database_id='"12345678-1234-1234-1234-123456789012"', @@ -160,7 +160,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: + def test_raw_response_update(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -172,7 +172,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: + def test_streaming_response_update(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -186,7 +186,7 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: GradientAI) -> None: + def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.knowledge_bases.with_raw_response.update( path_uuid="", @@ -194,13 +194,13 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.list() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.list( page=0, per_page=0, @@ -209,7 +209,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.list() assert response.is_closed is True @@ -219,7 +219,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -231,7 +231,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_method_delete(self, client: GradientAI) -> None: + def test_method_delete(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.delete( "uuid", ) @@ -239,7 +239,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: + def test_raw_response_delete(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.delete( "uuid", ) @@ -251,7 +251,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: + def test_streaming_response_delete(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.delete( "uuid", ) as response: @@ -265,7 +265,7 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: + def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.knowledge_bases.with_raw_response.delete( "", @@ -279,13 +279,13 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: + async def test_method_create(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.create() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.create( database_id='"12345678-1234-1234-1234-123456789012"', datasources=[ @@ -328,7 +328,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.create() assert response.is_closed is True @@ -338,7 +338,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -350,7 +350,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.retrieve( "uuid", ) @@ -358,7 +358,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.retrieve( "uuid", ) @@ -370,7 +370,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.retrieve( "uuid", ) as response: @@ -384,7 +384,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.knowledge_bases.with_raw_response.retrieve( "", @@ -392,7 +392,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: + async def test_method_update(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -400,7 +400,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', database_id='"12345678-1234-1234-1234-123456789012"', @@ -414,7 +414,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -426,7 +426,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: @@ -440,7 +440,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.knowledge_bases.with_raw_response.update( path_uuid="", @@ -448,13 +448,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.list() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.list( page=0, per_page=0, @@ -463,7 +463,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.list() assert response.is_closed is True @@ -473,7 +473,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -485,7 +485,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + async def test_method_delete(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.delete( "uuid", ) @@ -493,7 +493,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.delete( "uuid", ) @@ -505,7 +505,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.delete( "uuid", ) as response: @@ -519,7 +519,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.knowledge_bases.with_raw_response.delete( "", diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index f7e21015..7b2a5a4a 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ModelListResponse +from gradient.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,13 +19,13 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: model = client.models.list( page=0, per_page=0, @@ -36,7 +36,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.models.with_raw_response.list() assert response.is_closed is True @@ -46,7 +46,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,13 +64,13 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: model = await async_client.models.list( page=0, per_page=0, @@ -81,7 +81,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.models.with_raw_response.list() assert response.is_closed is True @@ -91,7 +91,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index f331342e..5bf67e91 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest +from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import RegionListResponse +from gradient.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,13 +19,13 @@ class TestRegions: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: + def test_method_list(self, client: Gradient) -> None: region = client.regions.list() assert_matches_type(RegionListResponse, region, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: + def test_method_list_with_all_params(self, client: Gradient) -> None: region = client.regions.list( page=1, per_page=1, @@ -34,7 +34,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: + def test_raw_response_list(self, client: Gradient) -> None: response = client.regions.with_raw_response.list() assert response.is_closed is True @@ -44,7 +44,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: + def test_streaming_response_list(self, client: Gradient) -> None: with client.regions.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,13 +62,13 @@ class TestAsyncRegions: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: + async def test_method_list(self, async_client: AsyncGradient) -> None: region = await async_client.regions.list() assert_matches_type(RegionListResponse, region, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: region = await async_client.regions.list( page=1, per_page=1, @@ -77,7 +77,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.regions.with_raw_response.list() assert response.is_closed is True @@ -87,7 +87,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.regions.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/conftest.py b/tests/conftest.py index 1e102b94..87962cc1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from do_gradientai._utils import is_dict +from gradient import Gradient, AsyncGradient, DefaultAioHttpClient +from gradient._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("do_gradientai").setLevel(logging.DEBUG) +logging.getLogger("gradient").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests @@ -51,12 +51,12 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: @pytest.fixture(scope="session") -def client(request: FixtureRequest) -> Iterator[GradientAI]: +def client(request: FixtureRequest) -> Iterator[Gradient]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - with GradientAI( + with Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -67,7 +67,7 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]: @pytest.fixture(scope="session") -async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI]: +async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradient]: param = getattr(request, "param", True) # defaults @@ -86,7 +86,7 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI else: raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") - async with AsyncGradientAI( + async with AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, diff --git a/tests/test_client.py b/tests/test_client.py index 85b7d31a..e85df56b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,12 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from do_gradientai._types import Omit -from do_gradientai._models import BaseModel, FinalRequestOptions -from do_gradientai._streaming import Stream, AsyncStream -from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from do_gradientai._base_client import ( +from gradient import Gradient, AsyncGradient, APIResponseValidationError +from gradient._types import Omit +from gradient._models import BaseModel, FinalRequestOptions +from gradient._streaming import Stream, AsyncStream +from gradient._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from gradient._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -53,7 +53,7 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: return 0.1 -def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int: +def _get_open_connections(client: Gradient | AsyncGradient) -> int: transport = client._client._transport assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) @@ -61,8 +61,8 @@ def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int: return len(pool._requests) -class TestGradientAI: - client = GradientAI( +class TestGradient: + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -123,7 +123,7 @@ def test_copy_default_options(self) -> None: assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -162,7 +162,7 @@ def test_copy_default_headers(self) -> None: client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -259,10 +259,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradient/_legacy_response.py", + "gradient/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradient/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -293,7 +293,7 @@ def test_request_timeout(self) -> None: assert timeout == httpx.Timeout(100.0) def test_client_timeout_option(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -309,7 +309,7 @@ def test_client_timeout_option(self) -> None: def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -324,7 +324,7 @@ def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -339,7 +339,7 @@ def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -355,7 +355,7 @@ def test_http_client_timeout_option(self) -> None: async def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): async with httpx.AsyncClient() as http_client: - GradientAI( + Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -365,7 +365,7 @@ async def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -377,7 +377,7 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = GradientAI( + client2 = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -393,7 +393,7 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -404,7 +404,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with update_env(**{"GRADIENTAI_API_KEY": Omit()}): - client2 = GradientAI( + client2 = Gradient( base_url=base_url, api_key=None, inference_key=inference_key, @@ -424,7 +424,7 @@ def test_validate_headers(self) -> None: assert request2.headers.get("Authorization") is None def test_default_query_option(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -543,7 +543,7 @@ def test_request_extra_query(self) -> None: params = dict(request.url.params) assert params == {"foo": "2"} - def test_multipart_repeating_array(self, client: GradientAI) -> None: + def test_multipart_repeating_array(self, client: Gradient) -> None: request = client._build_request( FinalRequestOptions.construct( method="post", @@ -630,7 +630,7 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = GradientAI( + client = Gradient( base_url="https://example.com/from_init", api_key=api_key, inference_key=inference_key, @@ -644,8 +644,8 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): - client = GradientAI( + with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): + client = Gradient( api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True ) assert client.base_url == "http://localhost:5000/from/env/" @@ -653,14 +653,14 @@ def test_base_url_env(self) -> None: @pytest.mark.parametrize( "client", [ - GradientAI( + Gradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True, ), - GradientAI( + Gradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, @@ -671,7 +671,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: GradientAI) -> None: + def test_base_url_trailing_slash(self, client: Gradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -684,14 +684,14 @@ def test_base_url_trailing_slash(self, client: GradientAI) -> None: @pytest.mark.parametrize( "client", [ - GradientAI( + Gradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True, ), - GradientAI( + Gradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, @@ -702,7 +702,7 @@ def test_base_url_trailing_slash(self, client: GradientAI) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: + def test_base_url_no_trailing_slash(self, client: Gradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -715,14 +715,14 @@ def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: @pytest.mark.parametrize( "client", [ - GradientAI( + Gradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True, ), - GradientAI( + Gradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, @@ -733,7 +733,7 @@ def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: GradientAI) -> None: + def test_absolute_request_url(self, client: Gradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -744,7 +744,7 @@ def test_absolute_request_url(self, client: GradientAI) -> None: assert request.url == "https://myapi.com/foo" def test_copied_client_does_not_close_http(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -761,7 +761,7 @@ def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() def test_client_context_manager(self) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -788,7 +788,7 @@ class Model(BaseModel): def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): - GradientAI( + Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -815,7 +815,7 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = GradientAI( + strict_client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -826,7 +826,7 @@ class Model(BaseModel): with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -860,7 +860,7 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = GradientAI( + client = Gradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -873,9 +873,9 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): @@ -891,9 +891,9 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): @@ -909,12 +909,12 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( self, - client: GradientAI, + client: Gradient, failures_before_success: int, failure_mode: Literal["status", "exception"], respx_mock: MockRouter, @@ -948,10 +948,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( - self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter + self, client: Gradient, failures_before_success: int, respx_mock: MockRouter ) -> None: client = client.with_options(max_retries=4) @@ -980,10 +980,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( - self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter + self, client: Gradient, failures_before_success: int, respx_mock: MockRouter ) -> None: client = client.with_options(max_retries=4) @@ -1061,8 +1061,8 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" -class TestAsyncGradientAI: - client = AsyncGradientAI( +class TestAsyncGradient: + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1125,7 +1125,7 @@ def test_copy_default_options(self) -> None: assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1164,7 +1164,7 @@ def test_copy_default_headers(self) -> None: client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1261,10 +1261,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradient/_legacy_response.py", + "gradient/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradient/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1295,7 +1295,7 @@ async def test_request_timeout(self) -> None: assert timeout == httpx.Timeout(100.0) async def test_client_timeout_option(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1311,7 +1311,7 @@ async def test_client_timeout_option(self) -> None: async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1326,7 +1326,7 @@ async def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1341,7 +1341,7 @@ async def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1357,7 +1357,7 @@ async def test_http_client_timeout_option(self) -> None: def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): with httpx.Client() as http_client: - AsyncGradientAI( + AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1367,7 +1367,7 @@ def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1379,7 +1379,7 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = AsyncGradientAI( + client2 = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1395,7 +1395,7 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1406,7 +1406,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with update_env(**{"GRADIENTAI_API_KEY": Omit()}): - client2 = AsyncGradientAI( + client2 = AsyncGradient( base_url=base_url, api_key=None, inference_key=inference_key, @@ -1426,7 +1426,7 @@ def test_validate_headers(self) -> None: assert request2.headers.get("Authorization") is None def test_default_query_option(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1545,7 +1545,7 @@ def test_request_extra_query(self) -> None: params = dict(request.url.params) assert params == {"foo": "2"} - def test_multipart_repeating_array(self, async_client: AsyncGradientAI) -> None: + def test_multipart_repeating_array(self, async_client: AsyncGradient) -> None: request = async_client._build_request( FinalRequestOptions.construct( method="post", @@ -1632,7 +1632,7 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url="https://example.com/from_init", api_key=api_key, inference_key=inference_key, @@ -1646,8 +1646,8 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): - client = AsyncGradientAI( + with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): + client = AsyncGradient( api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True ) assert client.base_url == "http://localhost:5000/from/env/" @@ -1655,14 +1655,14 @@ def test_base_url_env(self) -> None: @pytest.mark.parametrize( "client", [ - AsyncGradientAI( + AsyncGradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True, ), - AsyncGradientAI( + AsyncGradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, @@ -1673,7 +1673,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: + def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1686,14 +1686,14 @@ def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: @pytest.mark.parametrize( "client", [ - AsyncGradientAI( + AsyncGradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True, ), - AsyncGradientAI( + AsyncGradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, @@ -1704,7 +1704,7 @@ def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: + def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1717,14 +1717,14 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: @pytest.mark.parametrize( "client", [ - AsyncGradientAI( + AsyncGradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True, ), - AsyncGradientAI( + AsyncGradient( base_url="http://localhost:5000/custom/path/", api_key=api_key, inference_key=inference_key, @@ -1735,7 +1735,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: AsyncGradientAI) -> None: + def test_absolute_request_url(self, client: AsyncGradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1746,7 +1746,7 @@ def test_absolute_request_url(self, client: AsyncGradientAI) -> None: assert request.url == "https://myapi.com/foo" async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1764,7 +1764,7 @@ async def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1792,7 +1792,7 @@ class Model(BaseModel): async def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): - AsyncGradientAI( + AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1821,7 +1821,7 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = AsyncGradientAI( + strict_client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1832,7 +1832,7 @@ class Model(BaseModel): with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1867,7 +1867,7 @@ class Model(BaseModel): @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncGradientAI( + client = AsyncGradient( base_url=base_url, api_key=api_key, inference_key=inference_key, @@ -1880,10 +1880,10 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( - self, respx_mock: MockRouter, async_client: AsyncGradientAI + self, respx_mock: MockRouter, async_client: AsyncGradient ) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -1900,10 +1900,10 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( - self, respx_mock: MockRouter, async_client: AsyncGradientAI + self, respx_mock: MockRouter, async_client: AsyncGradient ) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) @@ -1920,13 +1920,13 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) async def test_retries_taken( self, - async_client: AsyncGradientAI, + async_client: AsyncGradient, failures_before_success: int, failure_mode: Literal["status", "exception"], respx_mock: MockRouter, @@ -1960,11 +1960,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( - self, async_client: AsyncGradientAI, failures_before_success: int, respx_mock: MockRouter + self, async_client: AsyncGradient, failures_before_success: int, respx_mock: MockRouter ) -> None: client = async_client.with_options(max_retries=4) @@ -1993,11 +1993,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( - self, async_client: AsyncGradientAI, failures_before_success: int, respx_mock: MockRouter + self, async_client: AsyncGradient, failures_before_success: int, respx_mock: MockRouter ) -> None: client = async_client.with_options(max_retries=4) @@ -2036,8 +2036,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from do_gradientai._utils import asyncify - from do_gradientai._base_client import get_platform + from gradient._utils import asyncify + from gradient._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 5a98ce1b..b5520a27 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from do_gradientai._utils import deepcopy_minimal +from gradient._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 341e65ae..9514d242 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from do_gradientai._types import FileTypes -from do_gradientai._utils import extract_files +from gradient._types import FileTypes +from gradient._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index ff7914bb..4d9f4066 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from do_gradientai._files import to_httpx_files, async_to_httpx_files +from gradient._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 9a3891e3..9a2ee908 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from do_gradientai._utils import PropertyInfo -from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from do_gradientai._models import BaseModel, construct_type +from gradient._utils import PropertyInfo +from gradient._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradient._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index c9213571..32fb2091 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from do_gradientai._qs import Querystring, stringify +from gradient._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index 434e9491..3956dc02 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from do_gradientai._utils import required_args +from gradient._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 001ce776..6dd53185 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from do_gradientai import BaseModel, GradientAI, AsyncGradientAI -from do_gradientai._response import ( +from gradient import Gradient, BaseModel, AsyncGradient +from gradient._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from do_gradientai._streaming import Stream -from do_gradientai._base_client import FinalRequestOptions +from gradient._streaming import Stream +from gradient._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -56,7 +56,7 @@ def test_extract_response_type_binary_response() -> None: class PydanticModel(pydantic.BaseModel): ... -def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: +def test_response_parse_mismatched_basemodel(client: Gradient) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, @@ -68,13 +68,13 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradient import BaseModel`", ): response.parse(to=PydanticModel) @pytest.mark.asyncio -async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGradientAI) -> None: +async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGradient) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, @@ -86,12 +86,12 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradient import BaseModel`", ): await response.parse(to=PydanticModel) -def test_response_parse_custom_stream(client: GradientAI) -> None: +def test_response_parse_custom_stream(client: Gradient) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, @@ -106,7 +106,7 @@ def test_response_parse_custom_stream(client: GradientAI) -> None: @pytest.mark.asyncio -async def test_async_response_parse_custom_stream(async_client: AsyncGradientAI) -> None: +async def test_async_response_parse_custom_stream(async_client: AsyncGradient) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, @@ -125,7 +125,7 @@ class CustomModel(BaseModel): bar: int -def test_response_parse_custom_model(client: GradientAI) -> None: +def test_response_parse_custom_model(client: Gradient) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, @@ -141,7 +141,7 @@ def test_response_parse_custom_model(client: GradientAI) -> None: @pytest.mark.asyncio -async def test_async_response_parse_custom_model(async_client: AsyncGradientAI) -> None: +async def test_async_response_parse_custom_model(async_client: AsyncGradient) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, @@ -156,7 +156,7 @@ async def test_async_response_parse_custom_model(async_client: AsyncGradientAI) assert obj.bar == 2 -def test_response_parse_annotated_type(client: GradientAI) -> None: +def test_response_parse_annotated_type(client: Gradient) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, @@ -173,7 +173,7 @@ def test_response_parse_annotated_type(client: GradientAI) -> None: assert obj.bar == 2 -async def test_async_response_parse_annotated_type(async_client: AsyncGradientAI) -> None: +async def test_async_response_parse_annotated_type(async_client: AsyncGradient) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, @@ -201,7 +201,7 @@ async def test_async_response_parse_annotated_type(async_client: AsyncGradientAI ("FalSe", False), ], ) -def test_response_parse_bool(client: GradientAI, content: str, expected: bool) -> None: +def test_response_parse_bool(client: Gradient, content: str, expected: bool) -> None: response = APIResponse( raw=httpx.Response(200, content=content), client=client, @@ -226,7 +226,7 @@ def test_response_parse_bool(client: GradientAI, content: str, expected: bool) - ("FalSe", False), ], ) -async def test_async_response_parse_bool(client: AsyncGradientAI, content: str, expected: bool) -> None: +async def test_async_response_parse_bool(client: AsyncGradient, content: str, expected: bool) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=content), client=client, @@ -245,7 +245,7 @@ class OtherModel(BaseModel): @pytest.mark.parametrize("client", [False], indirect=True) # loose validation -def test_response_parse_expect_model_union_non_json_content(client: GradientAI) -> None: +def test_response_parse_expect_model_union_non_json_content(client: Gradient) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=client, @@ -262,7 +262,7 @@ def test_response_parse_expect_model_union_non_json_content(client: GradientAI) @pytest.mark.asyncio @pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation -async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncGradientAI) -> None: +async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncGradient) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=async_client, diff --git a/tests/test_streaming.py b/tests/test_streaming.py index c1ce8e85..c4a8e46f 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,13 +5,13 @@ import httpx import pytest -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from gradient import Gradient, AsyncGradient +from gradient._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_basic(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_basic(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: completion\n" yield b'data: {"foo":true}\n' @@ -28,7 +28,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_data_missing_event(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_data_missing_event(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b'data: {"foo":true}\n' yield b"\n" @@ -44,7 +44,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_event_missing_data(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_event_missing_data(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"\n" @@ -60,7 +60,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_events(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_multiple_events(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"\n" @@ -82,7 +82,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_events_with_data(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_multiple_events_with_data(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b'data: {"foo":true}\n' @@ -106,9 +106,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_data_lines_with_empty_line( - sync: bool, client: GradientAI, async_client: AsyncGradientAI -) -> None: +async def test_multiple_data_lines_with_empty_line(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"data: {\n" @@ -130,7 +128,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_data_json_escaped_double_new_line(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_data_json_escaped_double_new_line(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b'data: {"foo": "my long\\n\\ncontent"}' @@ -147,7 +145,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_data_lines(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: +async def test_multiple_data_lines(sync: bool, client: Gradient, async_client: AsyncGradient) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"data: {\n" @@ -167,8 +165,8 @@ def body() -> Iterator[bytes]: @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_special_new_line_character( sync: bool, - client: GradientAI, - async_client: AsyncGradientAI, + client: Gradient, + async_client: AsyncGradient, ) -> None: def body() -> Iterator[bytes]: yield b'data: {"content":" culpa"}\n' @@ -198,8 +196,8 @@ def body() -> Iterator[bytes]: @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_multi_byte_character_multiple_chunks( sync: bool, - client: GradientAI, - async_client: AsyncGradientAI, + client: Gradient, + async_client: AsyncGradient, ) -> None: def body() -> Iterator[bytes]: yield b'data: {"content":"' @@ -239,8 +237,8 @@ def make_event_iterator( content: Iterator[bytes], *, sync: bool, - client: GradientAI, - async_client: AsyncGradientAI, + client: Gradient, + async_client: AsyncGradient, ) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]: if sync: return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events() diff --git a/tests/test_transform.py b/tests/test_transform.py index 30c06d6a..552462fa 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from do_gradientai._types import NOT_GIVEN, Base64FileInput -from do_gradientai._utils import ( +from gradient._types import NOT_GIVEN, Base64FileInput +from gradient._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from do_gradientai._compat import PYDANTIC_V2 -from do_gradientai._models import BaseModel +from gradient._compat import PYDANTIC_V2 +from gradient._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 9ce2e0d3..af6d092a 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from do_gradientai._utils import LazyProxy +from gradient._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index c9129fdc..5f9711a2 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from do_gradientai._utils import extract_type_var_from_base +from gradient._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index 9def7c60..e150f00b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from do_gradientai._types import Omit, NoneType -from do_gradientai._utils import ( +from gradient._types import Omit, NoneType +from gradient._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from do_gradientai._models import BaseModel +from gradient._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradient._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From 1b0c615f38b684f8d5450d5dfcc6c8c2b7b6d498 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 22:53:53 +0000 Subject: [PATCH 141/200] feat(api): remove GRADIENTAI env vars --- .github/workflows/ci.yml | 6 +++--- .stats.yml | 4 ++-- README.md | 6 +++--- scripts/utils/upload-artifact.sh | 2 +- src/gradient/_client.py | 24 ++++++++++++------------ tests/test_client.py | 4 ++-- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3acbc370..05a89405 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -36,7 +36,7 @@ jobs: run: ./scripts/lint build: - if: github.repository == 'stainless-sdks/gradientai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) + if: github.repository == 'stainless-sdks/gradient-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) timeout-minutes: 10 name: build permissions: @@ -76,7 +76,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 diff --git a/.stats.yml b/.stats.yml index 8250cefa..8c069c9b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859 -config_hash: b7f3d0224b636e5f618a254fa3a6499a +config_hash: 77ddef130940a6ad8ea6c6f66aee8757 diff --git a/README.md b/README.md index f141a2ea..60a95711 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ import os from gradient import Gradient client = Gradient( - api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted + api_key=os.environ.get("GRADIENT_API_KEY"), # This is the default and can be omitted ) completion = client.chat.completions.create( @@ -46,7 +46,7 @@ print(completion.choices) While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `GRADIENTAI_API_KEY="My API Key"` to your `.env` file +to add `GRADIENT_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage @@ -59,7 +59,7 @@ import asyncio from gradient import AsyncGradient client = AsyncGradient( - api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted + api_key=os.environ.get("GRADIENT_API_KEY"), # This is the default and can be omitted ) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 170e8cfe..d93584b2 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -20,7 +20,7 @@ UPLOAD_RESPONSE=$(curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/gradientai-python/$SHA/$FILENAME'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/gradient-python/$SHA/$FILENAME'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 03f8ddcd..dc808d5b 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -90,20 +90,20 @@ def __init__( """Construct a new synchronous Gradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `GRADIENTAI_API_KEY` - - `inference_key` from `GRADIENTAI_INFERENCE_KEY` - - `agent_key` from `GRADIENTAI_AGENT_KEY` + - `api_key` from `GRADIENT_API_KEY` + - `inference_key` from `GRADIENT_INFERENCE_KEY` + - `agent_key` from `GRADIENT_AGENT_KEY` """ if api_key is None: - api_key = os.environ.get("GRADIENTAI_API_KEY") + api_key = os.environ.get("GRADIENT_API_KEY") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") + inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") self.inference_key = inference_key if agent_key is None: - agent_key = os.environ.get("GRADIENTAI_AGENT_KEY") + agent_key = os.environ.get("GRADIENT_AGENT_KEY") self.agent_key = agent_key self.agent_domain = agent_domain @@ -345,20 +345,20 @@ def __init__( """Construct a new async AsyncGradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `GRADIENTAI_API_KEY` - - `inference_key` from `GRADIENTAI_INFERENCE_KEY` - - `agent_key` from `GRADIENTAI_AGENT_KEY` + - `api_key` from `GRADIENT_API_KEY` + - `inference_key` from `GRADIENT_INFERENCE_KEY` + - `agent_key` from `GRADIENT_AGENT_KEY` """ if api_key is None: - api_key = os.environ.get("GRADIENTAI_API_KEY") + api_key = os.environ.get("GRADIENT_API_KEY") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY") + inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") self.inference_key = inference_key if agent_key is None: - agent_key = os.environ.get("GRADIENTAI_AGENT_KEY") + agent_key = os.environ.get("GRADIENT_AGENT_KEY") self.agent_key = agent_key self.agent_domain = agent_domain diff --git a/tests/test_client.py b/tests/test_client.py index e85df56b..137b4325 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -403,7 +403,7 @@ def test_validate_headers(self) -> None: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with update_env(**{"GRADIENTAI_API_KEY": Omit()}): + with update_env(**{"GRADIENT_API_KEY": Omit()}): client2 = Gradient( base_url=base_url, api_key=None, @@ -1405,7 +1405,7 @@ def test_validate_headers(self) -> None: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with update_env(**{"GRADIENTAI_API_KEY": Omit()}): + with update_env(**{"GRADIENT_API_KEY": Omit()}): client2 = AsyncGradient( base_url=base_url, api_key=None, From 58c9b2339391a27770ecfe12693385645e7bed4e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 07:08:13 +0000 Subject: [PATCH 142/200] feat(client): support file upload requests --- src/gradient/_base_client.py | 5 ++++- src/gradient/_files.py | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/gradient/_base_client.py b/src/gradient/_base_client.py index 5ea17c81..74f3c57a 100644 --- a/src/gradient/_base_client.py +++ b/src/gradient/_base_client.py @@ -532,7 +532,10 @@ def _build_request( is_body_allowed = options.method.lower() != "get" if is_body_allowed: - kwargs["json"] = json_data if is_given(json_data) else None + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None kwargs["files"] = files else: headers.pop("Content-Type", None) diff --git a/src/gradient/_files.py b/src/gradient/_files.py index 715cc207..cc14c14f 100644 --- a/src/gradient/_files.py +++ b/src/gradient/_files.py @@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) + return (file[0], read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -def _read_file_content(file: FileContent) -> HttpxFileContent: +def read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], await _async_read_file_content(file[1]), *file[2:]) + return (file[0], await async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -async def _async_read_file_content(file: FileContent) -> HttpxFileContent: +async def async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() From 4483603649312204e97cb742d2d3de5d79b9f6df Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 18:43:30 +0000 Subject: [PATCH 143/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bf7fe4fa..ae66b92e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-beta.3" + ".": "3.0.0-beta.1" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3aaa2fdc..067ebfe7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "0.1.0-beta.3" +version = "3.0.0-beta.1" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 4a62d21d..f0d3c820 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "0.1.0-beta.3" # x-release-please-version +__version__ = "3.0.0-beta.1" # x-release-please-version From 59e39026442e6a7700ea2937247bb2cc68e48811 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 16:19:09 +0000 Subject: [PATCH 144/200] feat(api): collected updates 8/4 --- .stats.yml | 6 +- src/gradient/_client.py | 4 +- src/gradient/resources/agents/agents.py | 4 ++ src/gradient/types/agent_update_params.py | 2 + src/gradient/types/api_agent.py | 6 ++ src/gradient/types/api_model.py | 5 +- .../types/knowledge_base_create_params.py | 16 ++++- .../types/knowledge_bases/api_indexing_job.py | 9 +++ .../api_knowledge_base_data_source.py | 9 ++- .../evaluation_metrics/test_workspaces.py | 20 +++--- .../workspaces/test_agents.py | 4 +- .../agents/test_evaluation_datasets.py | 20 +++--- .../agents/test_evaluation_test_cases.py | 48 ++++++------- .../knowledge_bases/test_data_sources.py | 36 +++++----- tests/api_resources/test_agents.py | 2 + tests/api_resources/test_knowledge_bases.py | 68 +++++++++++-------- 16 files changed, 157 insertions(+), 102 deletions(-) diff --git a/.stats.yml b/.stats.yml index 8c069c9b..9dda6e1d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml -openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859 -config_hash: 77ddef130940a6ad8ea6c6f66aee8757 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9aca3802735e1375125412aa28ac36bf2175144b8218610a73d2e7f775694dff.yml +openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 +config_hash: de89a9c8fde0120577d2aca8be4ae027 diff --git a/src/gradient/_client.py b/src/gradient/_client.py index dc808d5b..21a227d8 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -112,7 +112,7 @@ def __init__( base_url = os.environ.get("GRADIENT_BASE_URL") self._base_url_overridden = base_url is not None if base_url is None: - base_url = f"https://api.digitalocean.com/" + base_url = f"https://api.digitalocean.com" super().__init__( version=__version__, @@ -367,7 +367,7 @@ def __init__( base_url = os.environ.get("GRADIENT_BASE_URL") self._base_url_overridden = base_url is not None if base_url is None: - base_url = f"https://api.digitalocean.com/" + base_url = f"https://api.digitalocean.com" super().__init__( version=__version__, diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py index cff147c9..67f7f4ae 100644 --- a/src/gradient/resources/agents/agents.py +++ b/src/gradient/resources/agents/agents.py @@ -300,6 +300,7 @@ def update( self, path_uuid: str, *, + agent_log_insights_enabled: bool | NotGiven = NOT_GIVEN, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, @@ -388,6 +389,7 @@ def update( else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}", body=maybe_transform( { + "agent_log_insights_enabled": agent_log_insights_enabled, "anthropic_key_uuid": anthropic_key_uuid, "conversation_logs_enabled": conversation_logs_enabled, "description": description, @@ -741,6 +743,7 @@ async def update( self, path_uuid: str, *, + agent_log_insights_enabled: bool | NotGiven = NOT_GIVEN, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, @@ -829,6 +832,7 @@ async def update( else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}", body=await async_maybe_transform( { + "agent_log_insights_enabled": agent_log_insights_enabled, "anthropic_key_uuid": anthropic_key_uuid, "conversation_logs_enabled": conversation_logs_enabled, "description": description, diff --git a/src/gradient/types/agent_update_params.py b/src/gradient/types/agent_update_params.py index 5d2b5597..c26bf833 100644 --- a/src/gradient/types/agent_update_params.py +++ b/src/gradient/types/agent_update_params.py @@ -12,6 +12,8 @@ class AgentUpdateParams(TypedDict, total=False): + agent_log_insights_enabled: bool + anthropic_key_uuid: str """Optional anthropic key uuid for use with anthropic models""" diff --git a/src/gradient/types/api_agent.py b/src/gradient/types/api_agent.py index 4be22aa5..abfbe828 100644 --- a/src/gradient/types/api_agent.py +++ b/src/gradient/types/api_agent.py @@ -173,6 +173,12 @@ class LoggingConfig(BaseModel): galileo_project_name: Optional[str] = None """Name of the Galileo project""" + insights_enabled: Optional[bool] = None + """Whether insights are enabled""" + + insights_enabled_at: Optional[datetime] = None + """Timestamp when insights were enabled""" + log_stream_id: Optional[str] = None """Identifier for the log stream""" diff --git a/src/gradient/types/api_model.py b/src/gradient/types/api_model.py index 7c530ee2..e7f99bc1 100644 --- a/src/gradient/types/api_model.py +++ b/src/gradient/types/api_model.py @@ -11,6 +11,9 @@ class APIModel(BaseModel): + id: Optional[str] = None + """Human-readable model identifier""" + agreement: Optional[APIAgreement] = None """Agreement Description""" @@ -21,7 +24,7 @@ class APIModel(BaseModel): """True if it is a foundational model provided by do""" name: Optional[str] = None - """Name of the model""" + """Display name of the model""" parent_uuid: Optional[str] = None """Unique id of the model, this model is based on""" diff --git a/src/gradient/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py index 9ecd777d..5c0df9a6 100644 --- a/src/gradient/types/knowledge_base_create_params.py +++ b/src/gradient/types/knowledge_base_create_params.py @@ -10,7 +10,7 @@ from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["KnowledgeBaseCreateParams", "Datasource"] +__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceDropboxDataSource"] class KnowledgeBaseCreateParams(TypedDict, total=False): @@ -51,6 +51,17 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): """The VPC to deploy the knowledge base database in""" +class DatasourceDropboxDataSource(TypedDict, total=False): + folder: str + + refresh_token: str + """Refresh token. + + you can obrain a refresh token by following the oauth2 flow. see + /v2/gen-ai/oauth2/dropbox/tokens for reference. + """ + + class Datasource(TypedDict, total=False): aws_data_source: AwsDataSourceParam """AWS S3 Data Source""" @@ -61,6 +72,9 @@ class Datasource(TypedDict, total=False): bucket_region: str """Deprecated, moved to data_source_details""" + dropbox_data_source: DatasourceDropboxDataSource + """Dropbox Data Source""" + file_upload_data_source: APIFileUploadDataSourceParam """File to upload as data source for knowledge base.""" diff --git a/src/gradient/types/knowledge_bases/api_indexing_job.py b/src/gradient/types/knowledge_bases/api_indexing_job.py index 240fd709..312e465c 100644 --- a/src/gradient/types/knowledge_bases/api_indexing_job.py +++ b/src/gradient/types/knowledge_bases/api_indexing_job.py @@ -55,6 +55,15 @@ class APIIndexingJob(BaseModel): total_datasources: Optional[int] = None """Number of datasources being indexed""" + total_items_failed: Optional[str] = None + """Total Items Failed""" + + total_items_indexed: Optional[str] = None + """Total Items Indexed""" + + total_items_skipped: Optional[str] = None + """Total Items Skipped""" + updated_at: Optional[datetime] = None """Last modified""" diff --git a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py index a4d695d2..ed370eb5 100644 --- a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py @@ -10,7 +10,7 @@ from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource -__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource"] +__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource", "DropboxDataSource"] class AwsDataSource(BaseModel): @@ -23,6 +23,10 @@ class AwsDataSource(BaseModel): """Region of bucket""" +class DropboxDataSource(BaseModel): + folder: Optional[str] = None + + class APIKnowledgeBaseDataSource(BaseModel): aws_data_source: Optional[AwsDataSource] = None """AWS S3 Data Source for Display""" @@ -33,6 +37,9 @@ class APIKnowledgeBaseDataSource(BaseModel): created_at: Optional[datetime] = None """Creation date / time""" + dropbox_data_source: Optional[DropboxDataSource] = None + """Dropbox Data Source for Display""" + file_upload_data_source: Optional[APIFileUploadDataSource] = None """File to upload as data source for knowledge base.""" diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index 608406bf..f326c1e3 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -35,8 +35,8 @@ def test_method_create(self, client: Gradient) -> None: def test_method_create_with_all_params(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.create( agent_uuids=["example string"], - description='"example string"', - name='"example name"', + description="example string", + name="example name", ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -117,9 +117,9 @@ def test_method_update(self, client: Gradient) -> None: def test_method_update_with_all_params(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - name='"example name"', - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description="example string", + name="example name", + body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -286,8 +286,8 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create( agent_uuids=["example string"], - description='"example string"', - name='"example name"', + description="example string", + name="example name", ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -368,9 +368,9 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - name='"example name"', - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description="example string", + name="example name", + body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index b70f9c58..2d63ff65 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -87,7 +87,7 @@ def test_method_move_with_all_params(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuids=["example string"], - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -198,7 +198,7 @@ async def test_method_move_with_all_params(self, async_client: AsyncGradient) -> agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuids=["example string"], - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index f60c4720..3978ebdd 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -31,11 +31,11 @@ def test_method_create(self, client: Gradient) -> None: def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "example name", + "size_in_bytes": "12345", + "stored_object_key": "example string", }, - name='"example name"', + name="example name", ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -75,7 +75,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": '"example name"', + "file_name": "example name", "file_size": "file_size", } ], @@ -127,11 +127,11 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "example name", + "size_in_bytes": "12345", + "stored_object_key": "example string", }, - name='"example name"', + name="example name", ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -171,7 +171,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params(self, as evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": '"example name"', + "file_name": "example name", "file_size": "file_size", } ], diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index 2860aa2c..b1d92580 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -33,17 +33,17 @@ def test_method_create(self, client: Gradient) -> None: @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create( - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', + dataset_uuid="123e4567-e89b-12d3-a456-426614174000", + description="example string", metrics=["example string"], - name='"example name"', + name="example name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', + "metric_uuid": "123e4567-e89b-12d3-a456-426614174000", + "name": "example name", "success_threshold": 123, "success_threshold_pct": 123, }, - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -133,17 +133,17 @@ def test_method_update(self, client: Gradient) -> None: def test_method_update_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', + dataset_uuid="123e4567-e89b-12d3-a456-426614174000", + description="example string", metrics={"metric_uuids": ["example string"]}, - name='"example name"', + name="example name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', + "metric_uuid": "123e4567-e89b-12d3-a456-426614174000", + "name": "example name", "success_threshold": 123, "success_threshold_pct": 123, }, - body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_test_case_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -278,17 +278,17 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create( - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', + dataset_uuid="123e4567-e89b-12d3-a456-426614174000", + description="example string", metrics=["example string"], - name='"example name"', + name="example name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', + "metric_uuid": "123e4567-e89b-12d3-a456-426614174000", + "name": "example name", "success_threshold": 123, "success_threshold_pct": 123, }, - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -378,17 +378,17 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', + dataset_uuid="123e4567-e89b-12d3-a456-426614174000", + description="example string", metrics={"metric_uuids": ["example string"]}, - name='"example name"', + name="example name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', + "metric_uuid": "123e4567-e89b-12d3-a456-426614174000", + "name": "example name", "success_threshold": 123, "success_threshold_pct": 123, }, - body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_test_case_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index a5734cea..0e44b584 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -35,20 +35,20 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "key_id": "123e4567-e89b-12d3-a456-426614174000", + "region": "example string", + "secret_key": "example string", }, body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', spaces_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "region": "example string", }, web_crawler_data_source={ - "base_url": '"example string"', + "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -215,20 +215,20 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) data_source = await async_client.knowledge_bases.data_sources.create( path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "key_id": "123e4567-e89b-12d3-a456-426614174000", + "region": "example string", + "secret_key": "example string", }, body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', spaces_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "region": "example string", }, web_crawler_data_source={ - "base_url": '"example string"', + "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, }, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 987f2eda..77825f7e 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -124,6 +124,7 @@ def test_method_update(self, client: Gradient) -> None: def test_method_update_with_all_params(self, client: Gradient) -> None: agent = client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_log_insights_enabled=True, anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', conversation_logs_enabled=True, description='"My Agent Description"', @@ -415,6 +416,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_log_insights_enabled=True, anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', conversation_logs_enabled=True, description='"My Agent Description"', diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 1628fdbe..23945480 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -37,27 +37,31 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: datasources=[ { "aws_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "key_id": "123e4567-e89b-12d3-a456-426614174000", + "region": "example string", + "secret_key": "example string", + }, + "bucket_name": "example name", + "bucket_region": "example string", + "dropbox_data_source": { + "folder": "example string", + "refresh_token": "example string", }, - "bucket_name": '"example name"', - "bucket_region": '"example string"', "file_upload_data_source": { - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "example name", + "size_in_bytes": "12345", + "stored_object_key": "example string", }, - "item_path": '"example string"', + "item_path": "example string", "spaces_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "region": "example string", }, "web_crawler_data_source": { - "base_url": '"example string"', + "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -291,27 +295,31 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) datasources=[ { "aws_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "key_id": "123e4567-e89b-12d3-a456-426614174000", + "region": "example string", + "secret_key": "example string", + }, + "bucket_name": "example name", + "bucket_region": "example string", + "dropbox_data_source": { + "folder": "example string", + "refresh_token": "example string", }, - "bucket_name": '"example name"', - "bucket_region": '"example string"', "file_upload_data_source": { - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "example name", + "size_in_bytes": "12345", + "stored_object_key": "example string", }, - "item_path": '"example string"', + "item_path": "example string", "spaces_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "example name", + "item_path": "example string", + "region": "example string", }, "web_crawler_data_source": { - "base_url": '"example string"', + "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, }, From d28774971b6946d6f400e62fd2787fc4d2b6023b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 16:21:43 +0000 Subject: [PATCH 145/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ae66b92e..d25059a8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.1" + ".": "3.0.0-beta.2" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 067ebfe7..62c4d5f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.1" +version = "3.0.0-beta.2" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index f0d3c820..5bf5b229 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.1" # x-release-please-version +__version__ = "3.0.0-beta.2" # x-release-please-version From 03c01229832c4755fc6c8fd1790d02f0fa8bed17 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 08:50:08 +0000 Subject: [PATCH 146/200] chore(internal): fix ruff target version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 62c4d5f2..096dd5ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,7 +159,7 @@ reportPrivateUsage = false [tool.ruff] line-length = 120 output-format = "grouped" -target-version = "py37" +target-version = "py38" [tool.ruff.format] docstring-code-format = true From eb1dcf77685317445031ac1fb78d3a3ecde49709 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 18:44:42 +0000 Subject: [PATCH 147/200] feat(api): rename environment variables To match the ui and DO ecosystem --- .stats.yml | 2 +- README.md | 6 +++--- src/gradient/_client.py | 24 ++++++++++++------------ tests/test_client.py | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9dda6e1d..b57aebbe 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9aca3802735e1375125412aa28ac36bf2175144b8218610a73d2e7f775694dff.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: de89a9c8fde0120577d2aca8be4ae027 +config_hash: 136e1973eb6297e6308a165594bd00a3 diff --git a/README.md b/README.md index 60a95711..82a71393 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ import os from gradient import Gradient client = Gradient( - api_key=os.environ.get("GRADIENT_API_KEY"), # This is the default and can be omitted + api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted ) completion = client.chat.completions.create( @@ -46,7 +46,7 @@ print(completion.choices) While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `GRADIENT_API_KEY="My API Key"` to your `.env` file +to add `DIGITALOCEAN_ACCESS_TOKEN="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage @@ -59,7 +59,7 @@ import asyncio from gradient import AsyncGradient client = AsyncGradient( - api_key=os.environ.get("GRADIENT_API_KEY"), # This is the default and can be omitted + api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted ) diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 21a227d8..d92102f4 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -90,20 +90,20 @@ def __init__( """Construct a new synchronous Gradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `GRADIENT_API_KEY` - - `inference_key` from `GRADIENT_INFERENCE_KEY` - - `agent_key` from `GRADIENT_AGENT_KEY` + - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` + - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` """ if api_key is None: - api_key = os.environ.get("GRADIENT_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") + inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") self.inference_key = inference_key if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_KEY") + agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") self.agent_key = agent_key self.agent_domain = agent_domain @@ -345,20 +345,20 @@ def __init__( """Construct a new async AsyncGradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `GRADIENT_API_KEY` - - `inference_key` from `GRADIENT_INFERENCE_KEY` - - `agent_key` from `GRADIENT_AGENT_KEY` + - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` + - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` """ if api_key is None: - api_key = os.environ.get("GRADIENT_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") + inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") self.inference_key = inference_key if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_KEY") + agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") self.agent_key = agent_key self.agent_domain = agent_domain diff --git a/tests/test_client.py b/tests/test_client.py index 137b4325..6760c226 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -403,7 +403,7 @@ def test_validate_headers(self) -> None: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with update_env(**{"GRADIENT_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = Gradient( base_url=base_url, api_key=None, @@ -1405,7 +1405,7 @@ def test_validate_headers(self) -> None: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with update_env(**{"GRADIENT_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = AsyncGradient( base_url=base_url, api_key=None, From 1803c8d31ac72a8c40b54f4e24f25284017df8f5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 19:42:50 +0000 Subject: [PATCH 148/200] feat(api): make kwargs match the env vars --- .stats.yml | 2 +- README.md | 16 +- src/gradient/_client.py | 148 ++++++++-------- tests/conftest.py | 18 +- tests/test_client.py | 372 ++++++++++++++++++++-------------------- 5 files changed, 289 insertions(+), 267 deletions(-) diff --git a/.stats.yml b/.stats.yml index b57aebbe..7b81dd11 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9aca3802735e1375125412aa28ac36bf2175144b8218610a73d2e7f775694dff.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 136e1973eb6297e6308a165594bd00a3 +config_hash: 99e3cd5dde0beb796f4547410869f726 diff --git a/README.md b/README.md index 82a71393..8ae17135 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,9 @@ import os from gradient import Gradient client = Gradient( - api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted + access_token=os.environ.get( + "DIGITALOCEAN_ACCESS_TOKEN" + ), # This is the default and can be omitted ) completion = client.chat.completions.create( @@ -44,10 +46,10 @@ completion = client.chat.completions.create( print(completion.choices) ``` -While you can provide an `api_key` keyword argument, +While you can provide a `access_token` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `DIGITALOCEAN_ACCESS_TOKEN="My API Key"` to your `.env` file -so that your API Key is not stored in source control. +to add `DIGITALOCEAN_ACCESS_TOKEN="My Access Token"` to your `.env` file +so that your Access Token is not stored in source control. ## Async usage @@ -59,7 +61,9 @@ import asyncio from gradient import AsyncGradient client = AsyncGradient( - api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted + access_token=os.environ.get( + "DIGITALOCEAN_ACCESS_TOKEN" + ), # This is the default and can be omitted ) @@ -102,7 +106,7 @@ from gradient import AsyncGradient async def main() -> None: async with AsyncGradient( - api_key="My API Key", + access_token="My Access Token", http_client=DefaultAioHttpClient(), ) as client: completion = await client.chat.completions.create( diff --git a/src/gradient/_client.py b/src/gradient/_client.py index d92102f4..c745eeec 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -56,18 +56,20 @@ class Gradient(SyncAPIClient): # client options - api_key: str | None - inference_key: str | None - agent_key: str | None - agent_domain: str | None + access_token: str | None + model_access_key: str | None + agent_access_key: str | None + agent_endpoint: str | None + inference_endpoint: str | None def __init__( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, - agent_domain: str | None = None, + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, + agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -90,23 +92,25 @@ def __init__( """Construct a new synchronous Gradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` - - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` - - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` + - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN` + - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` """ - if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") - self.api_key = api_key + if access_token is None: + access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") + self.access_token = access_token - if inference_key is None: - inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") - self.inference_key = inference_key + if model_access_key is None: + model_access_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") + self.model_access_key = model_access_key - if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") - self.agent_key = agent_key + if agent_access_key is None: + agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") + self.agent_access_key = agent_access_key - self.agent_domain = agent_domain + self.agent_endpoint = agent_endpoint + + self.inference_endpoint = inference_endpoint if base_url is None: base_url = os.environ.get("GRADIENT_BASE_URL") @@ -191,10 +195,10 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: - api_key = self.api_key - if api_key is None: + access_token = self.access_token + if access_token is None: return {} - return {"Authorization": f"Bearer {api_key}"} + return {"Authorization": f"Bearer {access_token}"} @property @override @@ -207,22 +211,23 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if self.api_key and headers.get("Authorization"): + if self.access_token and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return raise TypeError( - '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted"' ) def copy( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, - agent_domain: str | None = None, + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, + agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, @@ -256,10 +261,11 @@ def copy( http_client = http_client or self._client client = self.__class__( - api_key=api_key or self.api_key, - inference_key=inference_key or self.inference_key, - agent_key=agent_key or self.agent_key, - agent_domain=agent_domain or self.agent_domain, + access_token=access_token or self.access_token, + model_access_key=model_access_key or self.model_access_key, + agent_access_key=agent_access_key or self.agent_access_key, + agent_endpoint=agent_endpoint or self.agent_endpoint, + inference_endpoint=inference_endpoint or self.inference_endpoint, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -311,18 +317,20 @@ def _make_status_error( class AsyncGradient(AsyncAPIClient): # client options - api_key: str | None - inference_key: str | None - agent_key: str | None - agent_domain: str | None + access_token: str | None + model_access_key: str | None + agent_access_key: str | None + agent_endpoint: str | None + inference_endpoint: str | None def __init__( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, - agent_domain: str | None = None, + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, + agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -345,23 +353,25 @@ def __init__( """Construct a new async AsyncGradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` - - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` - - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` + - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN` + - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` """ - if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") - self.api_key = api_key + if access_token is None: + access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") + self.access_token = access_token + + if model_access_key is None: + model_access_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") + self.model_access_key = model_access_key - if inference_key is None: - inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") - self.inference_key = inference_key + if agent_access_key is None: + agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") + self.agent_access_key = agent_access_key - if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") - self.agent_key = agent_key + self.agent_endpoint = agent_endpoint - self.agent_domain = agent_domain + self.inference_endpoint = inference_endpoint if base_url is None: base_url = os.environ.get("GRADIENT_BASE_URL") @@ -446,10 +456,10 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: - api_key = self.api_key - if api_key is None: + access_token = self.access_token + if access_token is None: return {} - return {"Authorization": f"Bearer {api_key}"} + return {"Authorization": f"Bearer {access_token}"} @property @override @@ -462,22 +472,23 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if self.api_key and headers.get("Authorization"): + if self.access_token and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return raise TypeError( - '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted"' ) def copy( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, - agent_domain: str | None = None, + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, + agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, @@ -511,10 +522,11 @@ def copy( http_client = http_client or self._client client = self.__class__( - api_key=api_key or self.api_key, - inference_key=inference_key or self.inference_key, - agent_key=agent_key or self.agent_key, - agent_domain=agent_domain or self.agent_domain, + access_token=access_token or self.access_token, + model_access_key=model_access_key or self.model_access_key, + agent_access_key=agent_access_key or self.agent_access_key, + agent_endpoint=agent_endpoint or self.agent_endpoint, + inference_endpoint=inference_endpoint or self.inference_endpoint, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, diff --git a/tests/conftest.py b/tests/conftest.py index 87962cc1..bc2aa92e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -45,9 +45,9 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -api_key = "My API Key" -inference_key = "My Inference Key" -agent_key = "My Agent Key" +access_token = "My Access Token" +model_access_key = "My Model Access Key" +agent_access_key = "My Agent Access Key" @pytest.fixture(scope="session") @@ -58,9 +58,9 @@ def client(request: FixtureRequest) -> Iterator[Gradient]: with Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=strict, ) as client: yield client @@ -88,9 +88,9 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradient]: async with AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=strict, http_client=http_client, ) as client: diff --git a/tests/test_client.py b/tests/test_client.py index 6760c226..e21ee831 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -38,9 +38,9 @@ from .utils import update_env base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -api_key = "My API Key" -inference_key = "My Inference Key" -agent_key = "My Agent Key" +access_token = "My Access Token" +model_access_key = "My Model Access Key" +agent_access_key = "My Agent Access Key" def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: @@ -64,9 +64,9 @@ def _get_open_connections(client: Gradient | AsyncGradient) -> int: class TestGradient: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -94,17 +94,17 @@ def test_copy(self) -> None: copied = self.client.copy() assert id(copied) != id(self.client) - copied = self.client.copy(api_key="another My API Key") - assert copied.api_key == "another My API Key" - assert self.client.api_key == "My API Key" + copied = self.client.copy(access_token="another My Access Token") + assert copied.access_token == "another My Access Token" + assert self.client.access_token == "My Access Token" - copied = self.client.copy(inference_key="another My Inference Key") - assert copied.inference_key == "another My Inference Key" - assert self.client.inference_key == "My Inference Key" + copied = self.client.copy(model_access_key="another My Model Access Key") + assert copied.model_access_key == "another My Model Access Key" + assert self.client.model_access_key == "My Model Access Key" - copied = self.client.copy(agent_key="another My Agent Key") - assert copied.agent_key == "another My Agent Key" - assert self.client.agent_key == "My Agent Key" + copied = self.client.copy(agent_access_key="another My Agent Access Key") + assert copied.agent_access_key == "another My Agent Access Key" + assert self.client.agent_access_key == "My Agent Access Key" def test_copy_default_options(self) -> None: # options that have a default are overridden correctly @@ -125,9 +125,9 @@ def test_copy_default_options(self) -> None: def test_copy_default_headers(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -164,9 +164,9 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"foo": "bar"}, ) @@ -295,9 +295,9 @@ def test_request_timeout(self) -> None: def test_client_timeout_option(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, timeout=httpx.Timeout(0), ) @@ -311,9 +311,9 @@ def test_http_client_timeout_option(self) -> None: with httpx.Client(timeout=None) as http_client: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -326,9 +326,9 @@ def test_http_client_timeout_option(self) -> None: with httpx.Client() as http_client: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -341,9 +341,9 @@ def test_http_client_timeout_option(self) -> None: with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -357,9 +357,9 @@ async def test_invalid_http_client(self) -> None: async with httpx.AsyncClient() as http_client: Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -367,9 +367,9 @@ async def test_invalid_http_client(self) -> None: def test_default_headers_option(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -379,9 +379,9 @@ def test_default_headers_option(self) -> None: client2 = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -395,26 +395,26 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) - assert request.headers.get("Authorization") == f"Bearer {api_key}" + assert request.headers.get("Authorization") == f"Bearer {access_token}" with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = Gradient( base_url=base_url, - api_key=None, - inference_key=inference_key, - agent_key=agent_key, + access_token=None, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) with pytest.raises( TypeError, - match="Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted", + match="Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted", ): client2._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -426,9 +426,9 @@ def test_validate_headers(self) -> None: def test_default_query_option(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -632,9 +632,9 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = Gradient( base_url="https://example.com/from_init", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -646,7 +646,10 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = Gradient( - api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, + _strict_response_validation=True, ) assert client.base_url == "http://localhost:5000/from/env/" @@ -655,16 +658,16 @@ def test_base_url_env(self) -> None: [ Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -686,16 +689,16 @@ def test_base_url_trailing_slash(self, client: Gradient) -> None: [ Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -717,16 +720,16 @@ def test_base_url_no_trailing_slash(self, client: Gradient) -> None: [ Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -746,9 +749,9 @@ def test_absolute_request_url(self, client: Gradient) -> None: def test_copied_client_does_not_close_http(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert not client.is_closed() @@ -763,9 +766,9 @@ def test_copied_client_does_not_close_http(self) -> None: def test_client_context_manager(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) with client as c2: @@ -790,9 +793,9 @@ def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -817,9 +820,9 @@ class Model(BaseModel): strict_client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -828,9 +831,9 @@ class Model(BaseModel): client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=False, ) @@ -862,9 +865,9 @@ class Model(BaseModel): def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -1064,9 +1067,9 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: class TestAsyncGradient: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -1096,17 +1099,17 @@ def test_copy(self) -> None: copied = self.client.copy() assert id(copied) != id(self.client) - copied = self.client.copy(api_key="another My API Key") - assert copied.api_key == "another My API Key" - assert self.client.api_key == "My API Key" + copied = self.client.copy(access_token="another My Access Token") + assert copied.access_token == "another My Access Token" + assert self.client.access_token == "My Access Token" - copied = self.client.copy(inference_key="another My Inference Key") - assert copied.inference_key == "another My Inference Key" - assert self.client.inference_key == "My Inference Key" + copied = self.client.copy(model_access_key="another My Model Access Key") + assert copied.model_access_key == "another My Model Access Key" + assert self.client.model_access_key == "My Model Access Key" - copied = self.client.copy(agent_key="another My Agent Key") - assert copied.agent_key == "another My Agent Key" - assert self.client.agent_key == "My Agent Key" + copied = self.client.copy(agent_access_key="another My Agent Access Key") + assert copied.agent_access_key == "another My Agent Access Key" + assert self.client.agent_access_key == "My Agent Access Key" def test_copy_default_options(self) -> None: # options that have a default are overridden correctly @@ -1127,9 +1130,9 @@ def test_copy_default_options(self) -> None: def test_copy_default_headers(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1166,9 +1169,9 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"foo": "bar"}, ) @@ -1297,9 +1300,9 @@ async def test_request_timeout(self) -> None: async def test_client_timeout_option(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, timeout=httpx.Timeout(0), ) @@ -1313,9 +1316,9 @@ async def test_http_client_timeout_option(self) -> None: async with httpx.AsyncClient(timeout=None) as http_client: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -1328,9 +1331,9 @@ async def test_http_client_timeout_option(self) -> None: async with httpx.AsyncClient() as http_client: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -1343,9 +1346,9 @@ async def test_http_client_timeout_option(self) -> None: async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -1359,9 +1362,9 @@ def test_invalid_http_client(self) -> None: with httpx.Client() as http_client: AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -1369,9 +1372,9 @@ def test_invalid_http_client(self) -> None: def test_default_headers_option(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1381,9 +1384,9 @@ def test_default_headers_option(self) -> None: client2 = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -1397,26 +1400,26 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) - assert request.headers.get("Authorization") == f"Bearer {api_key}" + assert request.headers.get("Authorization") == f"Bearer {access_token}" with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = AsyncGradient( base_url=base_url, - api_key=None, - inference_key=inference_key, - agent_key=agent_key, + access_token=None, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) with pytest.raises( TypeError, - match="Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted", + match="Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted", ): client2._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1428,9 +1431,9 @@ def test_validate_headers(self) -> None: def test_default_query_option(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -1634,9 +1637,9 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = AsyncGradient( base_url="https://example.com/from_init", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -1648,7 +1651,10 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = AsyncGradient( - api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, + _strict_response_validation=True, ) assert client.base_url == "http://localhost:5000/from/env/" @@ -1657,16 +1663,16 @@ def test_base_url_env(self) -> None: [ AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1688,16 +1694,16 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: [ AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1719,16 +1725,16 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: [ AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1748,9 +1754,9 @@ def test_absolute_request_url(self, client: AsyncGradient) -> None: async def test_copied_client_does_not_close_http(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert not client.is_closed() @@ -1766,9 +1772,9 @@ async def test_copied_client_does_not_close_http(self) -> None: async def test_client_context_manager(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) async with client as c2: @@ -1794,9 +1800,9 @@ async def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -1823,9 +1829,9 @@ class Model(BaseModel): strict_client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -1834,9 +1840,9 @@ class Model(BaseModel): client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=False, ) @@ -1869,9 +1875,9 @@ class Model(BaseModel): async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) From 5d696fcdd3810f9780dfc3c3209676bb8d79add9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 20:35:40 +0000 Subject: [PATCH 149/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d25059a8..5e212f31 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.2" + ".": "3.0.0-beta.3" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 096dd5ed..e069fc01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.2" +version = "3.0.0-beta.3" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 5bf5b229..483c7ac9 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.2" # x-release-please-version +__version__ = "3.0.0-beta.3" # x-release-please-version From b14e916f053d6329f29254c63611fd7c9e22a3e4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 9 Aug 2025 05:13:51 +0000 Subject: [PATCH 150/200] chore: update @stainless-api/prism-cli to v5.15.0 --- scripts/mock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock b/scripts/mock index d2814ae6..0b28f6ea 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" fi From a82da1233e6b7cf3dbfa327bf214dede72328008 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 9 Aug 2025 05:20:15 +0000 Subject: [PATCH 151/200] chore(internal): update comment in script --- scripts/test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/test b/scripts/test index 2b878456..dbeda2d2 100755 --- a/scripts/test +++ b/scripts/test @@ -43,7 +43,7 @@ elif ! prism_is_running ; then echo -e "To run the server, pass in the path or url of your OpenAPI" echo -e "spec to the prism command:" echo - echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" echo exit 1 From 8e917cfeac6e0fa5d96f602f34fc53698c226538 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 02:21:33 +0000 Subject: [PATCH 152/200] chore(internal): codegen related update --- .../agents/chat/test_completions.py | 32 +-- .../evaluation_metrics/anthropic/test_keys.py | 104 ++++----- .../evaluation_metrics/openai/test_keys.py | 104 ++++----- .../agents/evaluation_metrics/test_models.py | 16 +- .../evaluation_metrics/test_workspaces.py | 96 ++++----- .../workspaces/test_agents.py | 40 ++-- tests/api_resources/agents/test_api_keys.py | 92 ++++---- .../agents/test_evaluation_datasets.py | 32 +-- .../agents/test_evaluation_metrics.py | 28 +-- .../agents/test_evaluation_runs.py | 68 +++--- .../agents/test_evaluation_test_cases.py | 88 ++++---- tests/api_resources/agents/test_functions.py | 56 ++--- .../agents/test_knowledge_bases.py | 48 ++--- tests/api_resources/agents/test_routes.py | 72 +++---- tests/api_resources/agents/test_versions.py | 40 ++-- tests/api_resources/chat/test_completions.py | 32 +-- .../databases/schema_registry/test_config.py | 64 +++--- .../gpu_droplets/account/test_keys.py | 68 +++--- .../gpu_droplets/firewalls/test_droplets.py | 32 +-- .../gpu_droplets/firewalls/test_rules.py | 40 ++-- .../gpu_droplets/firewalls/test_tags.py | 32 +-- .../gpu_droplets/floating_ips/test_actions.py | 64 +++--- .../gpu_droplets/images/test_actions.py | 48 ++--- .../load_balancers/test_droplets.py | 32 +-- .../load_balancers/test_forwarding_rules.py | 32 +-- .../gpu_droplets/test_actions.py | 200 +++++++++--------- .../gpu_droplets/test_autoscale.py | 140 ++++++------ .../gpu_droplets/test_backups.py | 56 ++--- .../test_destroy_with_associated_resources.py | 64 +++--- .../gpu_droplets/test_firewalls.py | 84 ++++---- .../gpu_droplets/test_floating_ips.py | 76 +++---- .../api_resources/gpu_droplets/test_images.py | 72 +++---- .../gpu_droplets/test_load_balancers.py | 136 ++++++------ .../api_resources/gpu_droplets/test_sizes.py | 16 +- .../gpu_droplets/test_snapshots.py | 40 ++-- .../gpu_droplets/test_volumes.py | 96 ++++----- .../gpu_droplets/volumes/test_actions.py | 132 ++++++------ .../gpu_droplets/volumes/test_snapshots.py | 72 +++---- .../api_resources/inference/test_api_keys.py | 84 ++++---- .../knowledge_bases/test_data_sources.py | 56 ++--- .../knowledge_bases/test_indexing_jobs.py | 84 ++++---- .../models/providers/test_anthropic.py | 104 ++++----- .../models/providers/test_openai.py | 104 ++++----- tests/api_resources/test_agents.py | 104 ++++----- tests/api_resources/test_gpu_droplets.py | 144 ++++++------- tests/api_resources/test_knowledge_bases.py | 84 ++++---- tests/api_resources/test_models.py | 16 +- tests/api_resources/test_regions.py | 16 +- 48 files changed, 1670 insertions(+), 1670 deletions(-) diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 695a374b..2824ed3d 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -17,7 +17,7 @@ class TestCompletions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: completion = client.agents.chat.completions.create( @@ -31,7 +31,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: completion = client.agents.chat.completions.create( @@ -71,7 +71,7 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.agents.chat.completions.with_raw_response.create( @@ -89,7 +89,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: completion = response.parse() assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.agents.chat.completions.with_streaming_response.create( @@ -109,7 +109,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: completion_stream = client.agents.chat.completions.create( @@ -124,7 +124,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) completion_stream.response.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: completion_stream = client.agents.chat.completions.create( @@ -164,7 +164,7 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non ) completion_stream.response.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.agents.chat.completions.with_raw_response.create( @@ -182,7 +182,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: stream = response.parse() stream.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.agents.chat.completions.with_streaming_response.create( @@ -209,7 +209,7 @@ class TestAsyncCompletions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.agents.chat.completions.create( @@ -223,7 +223,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.agents.chat.completions.create( @@ -263,7 +263,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( @@ -281,7 +281,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) completion = await response.parse() assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( @@ -301,7 +301,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.agents.chat.completions.create( @@ -316,7 +316,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) await completion_stream.response.aclose() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.agents.chat.completions.create( @@ -356,7 +356,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ) await completion_stream.response.aclose() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( @@ -374,7 +374,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) stream = await response.parse() await stream.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py index a8ca5724..b6b461e6 100644 --- a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py +++ b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py @@ -24,13 +24,13 @@ class TestKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.create( @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create() @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response: @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.retrieve( @@ -69,7 +69,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( @@ -81,7 +81,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( @@ -95,7 +95,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -103,7 +103,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.update( @@ -111,7 +111,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.update( @@ -122,7 +122,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( @@ -134,7 +134,7 @@ def test_raw_response_update(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update( @@ -148,7 +148,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -156,13 +156,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list( @@ -171,7 +171,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list() @@ -181,7 +181,7 @@ def test_raw_response_list(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response: @@ -193,7 +193,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.delete( @@ -201,7 +201,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( @@ -213,7 +213,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( @@ -227,7 +227,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -235,7 +235,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_agents(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list_agents( @@ -243,7 +243,7 @@ def test_method_list_agents(self, client: Gradient) -> None: ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_agents_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.list_agents( @@ -253,7 +253,7 @@ def test_method_list_agents_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_agents(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( @@ -265,7 +265,7 @@ def test_raw_response_list_agents(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_agents(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents( @@ -279,7 +279,7 @@ def test_streaming_response_list_agents(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -293,13 +293,13 @@ class TestAsyncKeys: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.create( @@ -308,7 +308,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create() @@ -318,7 +318,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response: @@ -330,7 +330,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.retrieve( @@ -338,7 +338,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( @@ -350,7 +350,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( @@ -364,7 +364,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -372,7 +372,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.update( @@ -380,7 +380,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.update( @@ -391,7 +391,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update( @@ -403,7 +403,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update( @@ -417,7 +417,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -425,13 +425,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list( @@ -440,7 +440,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list() @@ -450,7 +450,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response: @@ -462,7 +462,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.delete( @@ -470,7 +470,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( @@ -482,7 +482,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( @@ -496,7 +496,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -504,7 +504,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_agents(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents( @@ -512,7 +512,7 @@ async def test_method_list_agents(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents( @@ -522,7 +522,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents( @@ -534,7 +534,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> No key = await response.parse() assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents( @@ -548,7 +548,7 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradient) assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py index 5a22b1bc..da5cf8e1 100644 --- a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py +++ b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py @@ -24,13 +24,13 @@ class TestKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.create( @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.create() @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response: @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.retrieve( @@ -69,7 +69,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( @@ -81,7 +81,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( @@ -95,7 +95,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -103,7 +103,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.update( @@ -111,7 +111,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.update( @@ -122,7 +122,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.update( @@ -134,7 +134,7 @@ def test_raw_response_update(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.update( @@ -148,7 +148,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -156,13 +156,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list( @@ -171,7 +171,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list() @@ -181,7 +181,7 @@ def test_raw_response_list(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response: @@ -193,7 +193,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.delete( @@ -201,7 +201,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( @@ -213,7 +213,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( @@ -227,7 +227,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -235,7 +235,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_agents(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list_agents( @@ -243,7 +243,7 @@ def test_method_list_agents(self, client: Gradient) -> None: ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_agents_with_all_params(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.list_agents( @@ -253,7 +253,7 @@ def test_method_list_agents_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_agents(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( @@ -265,7 +265,7 @@ def test_raw_response_list_agents(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_agents(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents( @@ -279,7 +279,7 @@ def test_streaming_response_list_agents(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -293,13 +293,13 @@ class TestAsyncKeys: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.create() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.create( @@ -308,7 +308,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.create() @@ -318,7 +318,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response: @@ -330,7 +330,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.retrieve( @@ -338,7 +338,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( @@ -350,7 +350,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( @@ -364,7 +364,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -372,7 +372,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.update( @@ -380,7 +380,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.update( @@ -391,7 +391,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update( @@ -403,7 +403,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.update( @@ -417,7 +417,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -425,13 +425,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list( @@ -440,7 +440,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list() @@ -450,7 +450,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response: @@ -462,7 +462,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.delete( @@ -470,7 +470,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( @@ -482,7 +482,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyDeleteResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( @@ -496,7 +496,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -504,7 +504,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_agents(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list_agents( @@ -512,7 +512,7 @@ async def test_method_list_agents(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.list_agents( @@ -522,7 +522,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi ) assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents( @@ -534,7 +534,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> No key = await response.parse() assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents( @@ -548,7 +548,7 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradient) assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 624e5288..677b3383 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -17,13 +17,13 @@ class TestModels: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: model = client.agents.evaluation_metrics.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: model = client.agents.evaluation_metrics.models.list( @@ -34,7 +34,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.models.with_raw_response.list() @@ -44,7 +44,7 @@ def test_raw_response_list(self, client: Gradient) -> None: model = response.parse() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.models.with_streaming_response.list() as response: @@ -62,13 +62,13 @@ class TestAsyncModels: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: model = await async_client.agents.evaluation_metrics.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: model = await async_client.agents.evaluation_metrics.models.list( @@ -79,7 +79,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.models.with_raw_response.list() @@ -89,7 +89,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: model = await response.parse() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.models.with_streaming_response.list() as response: diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index f326c1e3..3acede09 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -24,13 +24,13 @@ class TestWorkspaces: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.create() assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.create( @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.create() @@ -50,7 +50,7 @@ def test_raw_response_create(self, client: Gradient) -> None: workspace = response.parse() assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response: @@ -62,7 +62,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.retrieve( @@ -70,7 +70,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( @@ -82,7 +82,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: workspace = response.parse() assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( @@ -96,7 +96,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -104,7 +104,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( @@ -112,7 +112,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( @@ -123,7 +123,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.update( @@ -135,7 +135,7 @@ def test_raw_response_update(self, client: Gradient) -> None: workspace = response.parse() assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.update( @@ -149,7 +149,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): @@ -157,13 +157,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_workspace_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.list() assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.list() @@ -173,7 +173,7 @@ def test_raw_response_list(self, client: Gradient) -> None: workspace = response.parse() assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response: @@ -185,7 +185,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.delete( @@ -193,7 +193,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.delete( @@ -205,7 +205,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: workspace = response.parse() assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( @@ -219,7 +219,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -227,7 +227,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_evaluation_test_cases(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( @@ -235,7 +235,7 @@ def test_method_list_evaluation_test_cases(self, client: Gradient) -> None: ) assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_evaluation_test_cases(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( @@ -247,7 +247,7 @@ def test_raw_response_list_evaluation_test_cases(self, client: Gradient) -> None workspace = response.parse() assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_evaluation_test_cases(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( @@ -261,7 +261,7 @@ def test_streaming_response_list_evaluation_test_cases(self, client: Gradient) - assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_evaluation_test_cases(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -275,13 +275,13 @@ class TestAsyncWorkspaces: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create() assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create( @@ -291,7 +291,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.create() @@ -301,7 +301,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: workspace = await response.parse() assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response: @@ -313,7 +313,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.retrieve( @@ -321,7 +321,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( @@ -333,7 +333,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: workspace = await response.parse() assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( @@ -347,7 +347,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -355,7 +355,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( @@ -363,7 +363,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( @@ -374,7 +374,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( @@ -386,7 +386,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: workspace = await response.parse() assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update( @@ -400,7 +400,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): @@ -408,13 +408,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_workspace_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.list() assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list() @@ -424,7 +424,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: workspace = await response.parse() assert_matches_type(WorkspaceListResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response: @@ -436,7 +436,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.delete( @@ -444,7 +444,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete( @@ -456,7 +456,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: workspace = await response.parse() assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( @@ -470,7 +470,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -478,7 +478,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( @@ -486,7 +486,7 @@ async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradie ) assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( @@ -498,7 +498,7 @@ async def test_raw_response_list_evaluation_test_cases(self, async_client: Async workspace = await response.parse() assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( @@ -512,7 +512,7 @@ async def test_streaming_response_list_evaluation_test_cases(self, async_client: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 2d63ff65..4154843c 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -20,7 +20,7 @@ class TestAgents: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( @@ -28,7 +28,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( @@ -39,7 +39,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( @@ -51,7 +51,7 @@ def test_raw_response_list(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( @@ -65,7 +65,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -73,7 +73,7 @@ def test_path_params_list(self, client: Gradient) -> None: workspace_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_move(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( @@ -81,7 +81,7 @@ def test_method_move(self, client: Gradient) -> None: ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_move_with_all_params(self, client: Gradient) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( @@ -91,7 +91,7 @@ def test_method_move_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_move(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( @@ -103,7 +103,7 @@ def test_raw_response_move(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentMoveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_move(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( @@ -117,7 +117,7 @@ def test_streaming_response_move(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_move(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): @@ -131,7 +131,7 @@ class TestAsyncAgents: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( @@ -139,7 +139,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( @@ -150,7 +150,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( @@ -162,7 +162,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( @@ -176,7 +176,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"): @@ -184,7 +184,7 @@ async def test_path_params_list(self, async_client: AsyncGradient) -> None: workspace_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_move(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( @@ -192,7 +192,7 @@ async def test_method_move(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_move_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( @@ -202,7 +202,7 @@ async def test_method_move_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_move(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( @@ -214,7 +214,7 @@ async def test_raw_response_move(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentMoveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_move(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( @@ -228,7 +228,7 @@ async def test_streaming_response_move(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_move(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"): diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 4b80fc54..dbb19890 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -23,7 +23,7 @@ class TestAPIKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: api_key = client.agents.api_keys.create( @@ -31,7 +31,7 @@ def test_method_create(self, client: Gradient) -> None: ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: api_key = client.agents.api_keys.create( @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.create( @@ -53,7 +53,7 @@ def test_raw_response_create(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.create( @@ -67,7 +67,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_create(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -75,7 +75,7 @@ def test_path_params_create(self, client: Gradient) -> None: path_agent_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: api_key = client.agents.api_keys.update( @@ -84,7 +84,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: api_key = client.agents.api_keys.update( @@ -96,7 +96,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.update( @@ -109,7 +109,7 @@ def test_raw_response_update(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.update( @@ -124,7 +124,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -139,7 +139,7 @@ def test_path_params_update(self, client: Gradient) -> None: path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: api_key = client.agents.api_keys.list( @@ -147,7 +147,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: api_key = client.agents.api_keys.list( @@ -157,7 +157,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.list( @@ -169,7 +169,7 @@ def test_raw_response_list(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.list( @@ -183,7 +183,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -191,7 +191,7 @@ def test_path_params_list(self, client: Gradient) -> None: agent_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: api_key = client.agents.api_keys.delete( @@ -200,7 +200,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.delete( @@ -213,7 +213,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.delete( @@ -228,7 +228,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -243,7 +243,7 @@ def test_path_params_delete(self, client: Gradient) -> None: agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_regenerate(self, client: Gradient) -> None: api_key = client.agents.api_keys.regenerate( @@ -252,7 +252,7 @@ def test_method_regenerate(self, client: Gradient) -> None: ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_regenerate(self, client: Gradient) -> None: response = client.agents.api_keys.with_raw_response.regenerate( @@ -265,7 +265,7 @@ def test_raw_response_regenerate(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_regenerate(self, client: Gradient) -> None: with client.agents.api_keys.with_streaming_response.regenerate( @@ -280,7 +280,7 @@ def test_streaming_response_regenerate(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_regenerate(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -301,7 +301,7 @@ class TestAsyncAPIKeys: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.create( @@ -309,7 +309,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.create( @@ -319,7 +319,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.create( @@ -331,7 +331,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.create( @@ -345,7 +345,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -353,7 +353,7 @@ async def test_path_params_create(self, async_client: AsyncGradient) -> None: path_agent_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.update( @@ -362,7 +362,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.update( @@ -374,7 +374,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.update( @@ -387,7 +387,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.update( @@ -402,7 +402,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -417,7 +417,7 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.list( @@ -425,7 +425,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.list( @@ -435,7 +435,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.list( @@ -447,7 +447,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.list( @@ -461,7 +461,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -469,7 +469,7 @@ async def test_path_params_list(self, async_client: AsyncGradient) -> None: agent_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.delete( @@ -478,7 +478,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.delete( @@ -491,7 +491,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.delete( @@ -506,7 +506,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -521,7 +521,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_regenerate(self, async_client: AsyncGradient) -> None: api_key = await async_client.agents.api_keys.regenerate( @@ -530,7 +530,7 @@ async def test_method_regenerate(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradient) -> None: response = await async_client.agents.api_keys.with_raw_response.regenerate( @@ -543,7 +543,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradient) -> Non api_key = await response.parse() assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradient) -> None: async with async_client.agents.api_keys.with_streaming_response.regenerate( @@ -558,7 +558,7 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradient) assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_regenerate(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 3978ebdd..64dceb03 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -20,13 +20,13 @@ class TestEvaluationDatasets: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create( @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_datasets.with_raw_response.create() @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: Gradient) -> None: evaluation_dataset = response.parse() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_datasets.with_streaming_response.create() as response: @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_file_upload_presigned_urls(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls() @@ -69,7 +69,7 @@ def test_method_create_file_upload_presigned_urls(self, client: Gradient) -> Non EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_file_upload_presigned_urls_with_all_params(self, client: Gradient) -> None: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( @@ -84,7 +84,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_file_upload_presigned_urls(self, client: Gradient) -> None: response = client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() @@ -96,7 +96,7 @@ def test_raw_response_create_file_upload_presigned_urls(self, client: Gradient) EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_file_upload_presigned_urls(self, client: Gradient) -> None: with client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response: @@ -116,13 +116,13 @@ class TestAsyncEvaluationDatasets: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create( @@ -135,7 +135,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_datasets.with_raw_response.create() @@ -145,7 +145,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: evaluation_dataset = await response.parse() assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_datasets.with_streaming_response.create() as response: @@ -157,7 +157,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls() @@ -165,7 +165,7 @@ async def test_method_create_file_upload_presigned_urls(self, async_client: Asyn EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_file_upload_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( @@ -180,7 +180,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params(self, as EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() @@ -192,7 +192,7 @@ async def test_raw_response_create_file_upload_presigned_urls(self, async_client EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradient) -> None: async with ( diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index 612f4228..088353bb 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -20,13 +20,13 @@ class TestEvaluationMetrics: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: evaluation_metric = client.agents.evaluation_metrics.list() assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.with_raw_response.list() @@ -36,7 +36,7 @@ def test_raw_response_list(self, client: Gradient) -> None: evaluation_metric = response.parse() assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_metrics.with_streaming_response.list() as response: @@ -48,13 +48,13 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_regions(self, client: Gradient) -> None: evaluation_metric = client.agents.evaluation_metrics.list_regions() assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_regions_with_all_params(self, client: Gradient) -> None: evaluation_metric = client.agents.evaluation_metrics.list_regions( @@ -63,7 +63,7 @@ def test_method_list_regions_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_regions(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.with_raw_response.list_regions() @@ -73,7 +73,7 @@ def test_raw_response_list_regions(self, client: Gradient) -> None: evaluation_metric = response.parse() assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_regions(self, client: Gradient) -> None: with client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: @@ -91,13 +91,13 @@ class TestAsyncEvaluationMetrics: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: evaluation_metric = await async_client.agents.evaluation_metrics.list() assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.with_raw_response.list() @@ -107,7 +107,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: evaluation_metric = await response.parse() assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.with_streaming_response.list() as response: @@ -119,13 +119,13 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_regions(self, async_client: AsyncGradient) -> None: evaluation_metric = await async_client.agents.evaluation_metrics.list_regions() assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_regions_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_metric = await async_client.agents.evaluation_metrics.list_regions( @@ -134,7 +134,7 @@ async def test_method_list_regions_with_all_params(self, async_client: AsyncGrad ) assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_regions(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.with_raw_response.list_regions() @@ -144,7 +144,7 @@ async def test_raw_response_list_regions(self, async_client: AsyncGradient) -> N evaluation_metric = await response.parse() assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_regions(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index be842cbc..8d7e1826 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -22,13 +22,13 @@ class TestEvaluationRuns: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.create() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.create( @@ -38,7 +38,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.create() @@ -48,7 +48,7 @@ def test_raw_response_create(self, client: Gradient) -> None: evaluation_run = response.parse() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.create() as response: @@ -60,7 +60,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.retrieve( @@ -68,7 +68,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve( @@ -80,7 +80,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: evaluation_run = response.parse() assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve( @@ -94,7 +94,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): @@ -102,7 +102,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_results(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.list_results( @@ -110,7 +110,7 @@ def test_method_list_results(self, client: Gradient) -> None: ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_results_with_all_params(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.list_results( @@ -120,7 +120,7 @@ def test_method_list_results_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_results(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.list_results( @@ -132,7 +132,7 @@ def test_raw_response_list_results(self, client: Gradient) -> None: evaluation_run = response.parse() assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_results(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.list_results( @@ -146,7 +146,7 @@ def test_streaming_response_list_results(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_results(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): @@ -154,7 +154,7 @@ def test_path_params_list_results(self, client: Gradient) -> None: evaluation_run_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_results(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.retrieve_results( @@ -163,7 +163,7 @@ def test_method_retrieve_results(self, client: Gradient) -> None: ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve_results(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve_results( @@ -176,7 +176,7 @@ def test_raw_response_retrieve_results(self, client: Gradient) -> None: evaluation_run = response.parse() assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve_results(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve_results( @@ -191,7 +191,7 @@ def test_streaming_response_retrieve_results(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve_results(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): @@ -206,13 +206,13 @@ class TestAsyncEvaluationRuns: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.create() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.create( @@ -222,7 +222,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.create() @@ -232,7 +232,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: evaluation_run = await response.parse() assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.create() as response: @@ -244,7 +244,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve( @@ -252,7 +252,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve( @@ -264,7 +264,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: evaluation_run = await response.parse() assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve( @@ -278,7 +278,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): @@ -286,7 +286,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_results(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( @@ -294,7 +294,7 @@ async def test_method_list_results(self, async_client: AsyncGradient) -> None: ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_results_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( @@ -304,7 +304,7 @@ async def test_method_list_results_with_all_params(self, async_client: AsyncGrad ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_results(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.list_results( @@ -316,7 +316,7 @@ async def test_raw_response_list_results(self, async_client: AsyncGradient) -> N evaluation_run = await response.parse() assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_results(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.list_results( @@ -330,7 +330,7 @@ async def test_streaming_response_list_results(self, async_client: AsyncGradient assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_results(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): @@ -338,7 +338,7 @@ async def test_path_params_list_results(self, async_client: AsyncGradient) -> No evaluation_run_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_results(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve_results( @@ -347,7 +347,7 @@ async def test_method_retrieve_results(self, async_client: AsyncGradient) -> Non ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve_results(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( @@ -360,7 +360,7 @@ async def test_raw_response_retrieve_results(self, async_client: AsyncGradient) evaluation_run = await response.parse() assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve_results(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results( @@ -375,7 +375,7 @@ async def test_streaming_response_retrieve_results(self, async_client: AsyncGrad assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve_results(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index b1d92580..7cd0a07e 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -23,13 +23,13 @@ class TestEvaluationTestCases: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create( @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.create() @@ -57,7 +57,7 @@ def test_raw_response_create(self, client: Gradient) -> None: evaluation_test_case = response.parse() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.create() as response: @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( @@ -77,7 +77,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( @@ -86,7 +86,7 @@ def test_method_retrieve_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.retrieve( @@ -98,7 +98,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: evaluation_test_case = response.parse() assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.retrieve( @@ -112,7 +112,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): @@ -120,7 +120,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: test_case_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( @@ -128,7 +128,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( @@ -147,7 +147,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.update( @@ -159,7 +159,7 @@ def test_raw_response_update(self, client: Gradient) -> None: evaluation_test_case = response.parse() assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.update( @@ -173,7 +173,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): @@ -181,13 +181,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_test_case_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list() @@ -197,7 +197,7 @@ def test_raw_response_list(self, client: Gradient) -> None: evaluation_test_case = response.parse() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list() as response: @@ -209,7 +209,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_evaluation_runs(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( @@ -217,7 +217,7 @@ def test_method_list_evaluation_runs(self, client: Gradient) -> None: ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_evaluation_runs_with_all_params(self, client: Gradient) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( @@ -226,7 +226,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: Gradient) -> ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_evaluation_runs(self, client: Gradient) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( @@ -238,7 +238,7 @@ def test_raw_response_list_evaluation_runs(self, client: Gradient) -> None: evaluation_test_case = response.parse() assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_evaluation_runs(self, client: Gradient) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( @@ -252,7 +252,7 @@ def test_streaming_response_list_evaluation_runs(self, client: Gradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_evaluation_runs(self, client: Gradient) -> None: with pytest.raises( @@ -268,13 +268,13 @@ class TestAsyncEvaluationTestCases: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create( @@ -292,7 +292,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.create() @@ -302,7 +302,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: evaluation_test_case = await response.parse() assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.create() as response: @@ -314,7 +314,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( @@ -322,7 +322,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( @@ -331,7 +331,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( @@ -343,7 +343,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: evaluation_test_case = await response.parse() assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( @@ -357,7 +357,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): @@ -365,7 +365,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: test_case_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( @@ -373,7 +373,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( @@ -392,7 +392,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.update( @@ -404,7 +404,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: evaluation_test_case = await response.parse() assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.update( @@ -418,7 +418,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): @@ -426,13 +426,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_test_case_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list() @@ -442,7 +442,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: evaluation_test_case = await response.parse() assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list() as response: @@ -454,7 +454,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_evaluation_runs(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( @@ -462,7 +462,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradient) -> ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradient) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( @@ -471,7 +471,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( @@ -483,7 +483,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie evaluation_test_case = await response.parse() assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( @@ -497,7 +497,7 @@ async def test_streaming_response_list_evaluation_runs(self, async_client: Async assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradient) -> None: with pytest.raises( diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 0ba54432..64d55331 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -21,7 +21,7 @@ class TestFunctions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: function = client.agents.functions.create( @@ -29,7 +29,7 @@ def test_method_create(self, client: Gradient) -> None: ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: function = client.agents.functions.create( @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.functions.with_raw_response.create( @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: Gradient) -> None: function = response.parse() assert_matches_type(FunctionCreateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.functions.with_streaming_response.create( @@ -70,7 +70,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_create(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -78,7 +78,7 @@ def test_path_params_create(self, client: Gradient) -> None: path_agent_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: function = client.agents.functions.update( @@ -87,7 +87,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: function = client.agents.functions.update( @@ -104,7 +104,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.functions.with_raw_response.update( @@ -117,7 +117,7 @@ def test_raw_response_update(self, client: Gradient) -> None: function = response.parse() assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.functions.with_streaming_response.update( @@ -132,7 +132,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -147,7 +147,7 @@ def test_path_params_update(self, client: Gradient) -> None: path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: function = client.agents.functions.delete( @@ -156,7 +156,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.functions.with_raw_response.delete( @@ -169,7 +169,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: function = response.parse() assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.functions.with_streaming_response.delete( @@ -184,7 +184,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -205,7 +205,7 @@ class TestAsyncFunctions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.create( @@ -213,7 +213,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.create( @@ -228,7 +228,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.functions.with_raw_response.create( @@ -240,7 +240,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: function = await response.parse() assert_matches_type(FunctionCreateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.functions.with_streaming_response.create( @@ -254,7 +254,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -262,7 +262,7 @@ async def test_path_params_create(self, async_client: AsyncGradient) -> None: path_agent_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.update( @@ -271,7 +271,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.update( @@ -288,7 +288,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.functions.with_raw_response.update( @@ -301,7 +301,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: function = await response.parse() assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.functions.with_streaming_response.update( @@ -316,7 +316,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): @@ -331,7 +331,7 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: function = await async_client.agents.functions.delete( @@ -340,7 +340,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.functions.with_raw_response.delete( @@ -353,7 +353,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: function = await response.parse() assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.functions.with_streaming_response.delete( @@ -368,7 +368,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index dd35e5f4..60dae7d0 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -17,7 +17,7 @@ class TestKnowledgeBases: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_attach(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.attach( @@ -25,7 +25,7 @@ def test_method_attach(self, client: Gradient) -> None: ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_attach(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.attach( @@ -37,7 +37,7 @@ def test_raw_response_attach(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_attach(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.attach( @@ -51,7 +51,7 @@ def test_streaming_response_attach(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_attach(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -59,7 +59,7 @@ def test_path_params_attach(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_attach_single(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.attach_single( @@ -68,7 +68,7 @@ def test_method_attach_single(self, client: Gradient) -> None: ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_attach_single(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.attach_single( @@ -81,7 +81,7 @@ def test_raw_response_attach_single(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_attach_single(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.attach_single( @@ -96,7 +96,7 @@ def test_streaming_response_attach_single(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_attach_single(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -111,7 +111,7 @@ def test_path_params_attach_single(self, client: Gradient) -> None: agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_detach(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.detach( @@ -120,7 +120,7 @@ def test_method_detach(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_detach(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.detach( @@ -133,7 +133,7 @@ def test_raw_response_detach(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_detach(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.detach( @@ -148,7 +148,7 @@ def test_streaming_response_detach(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_detach(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -169,7 +169,7 @@ class TestAsyncKnowledgeBases: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_attach(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach( @@ -177,7 +177,7 @@ async def test_method_attach(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_attach(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach( @@ -189,7 +189,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_attach(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach( @@ -203,7 +203,7 @@ async def test_streaming_response_attach(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_attach(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -211,7 +211,7 @@ async def test_path_params_attach(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_attach_single(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach_single( @@ -220,7 +220,7 @@ async def test_method_attach_single(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( @@ -233,7 +233,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradient) -> knowledge_base = await response.parse() assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( @@ -248,7 +248,7 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_attach_single(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): @@ -263,7 +263,7 @@ async def test_path_params_attach_single(self, async_client: AsyncGradient) -> N agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_detach(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.detach( @@ -272,7 +272,7 @@ async def test_method_detach(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_detach(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.detach( @@ -285,7 +285,7 @@ async def test_raw_response_detach(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.detach( @@ -300,7 +300,7 @@ async def test_streaming_response_detach(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_detach(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 294fa853..37bc4eac 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -22,7 +22,7 @@ class TestRoutes: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: route = client.agents.routes.update( @@ -31,7 +31,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: route = client.agents.routes.update( @@ -45,7 +45,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.update( @@ -58,7 +58,7 @@ def test_raw_response_update(self, client: Gradient) -> None: route = response.parse() assert_matches_type(RouteUpdateResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.update( @@ -73,7 +73,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises( @@ -90,7 +90,7 @@ def test_path_params_update(self, client: Gradient) -> None: path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: route = client.agents.routes.delete( @@ -99,7 +99,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.delete( @@ -112,7 +112,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: route = response.parse() assert_matches_type(RouteDeleteResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.delete( @@ -127,7 +127,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): @@ -142,7 +142,7 @@ def test_path_params_delete(self, client: Gradient) -> None: parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add(self, client: Gradient) -> None: route = client.agents.routes.add( @@ -151,7 +151,7 @@ def test_method_add(self, client: Gradient) -> None: ) assert_matches_type(RouteAddResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add_with_all_params(self, client: Gradient) -> None: route = client.agents.routes.add( @@ -164,7 +164,7 @@ def test_method_add_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(RouteAddResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_add(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.add( @@ -177,7 +177,7 @@ def test_raw_response_add(self, client: Gradient) -> None: route = response.parse() assert_matches_type(RouteAddResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_add(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.add( @@ -192,7 +192,7 @@ def test_streaming_response_add(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_add(self, client: Gradient) -> None: with pytest.raises( @@ -209,7 +209,7 @@ def test_path_params_add(self, client: Gradient) -> None: path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_view(self, client: Gradient) -> None: route = client.agents.routes.view( @@ -217,7 +217,7 @@ def test_method_view(self, client: Gradient) -> None: ) assert_matches_type(RouteViewResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_view(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.view( @@ -229,7 +229,7 @@ def test_raw_response_view(self, client: Gradient) -> None: route = response.parse() assert_matches_type(RouteViewResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_view(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.view( @@ -243,7 +243,7 @@ def test_streaming_response_view(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_view(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -257,7 +257,7 @@ class TestAsyncRoutes: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.update( @@ -266,7 +266,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.update( @@ -280,7 +280,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.update( @@ -293,7 +293,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: route = await response.parse() assert_matches_type(RouteUpdateResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.update( @@ -308,7 +308,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises( @@ -325,7 +325,7 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.delete( @@ -334,7 +334,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.delete( @@ -347,7 +347,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: route = await response.parse() assert_matches_type(RouteDeleteResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.delete( @@ -362,7 +362,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): @@ -377,7 +377,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.add( @@ -386,7 +386,7 @@ async def test_method_add(self, async_client: AsyncGradient) -> None: ) assert_matches_type(RouteAddResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.add( @@ -399,7 +399,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(RouteAddResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.add( @@ -412,7 +412,7 @@ async def test_raw_response_add(self, async_client: AsyncGradient) -> None: route = await response.parse() assert_matches_type(RouteAddResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.add( @@ -427,7 +427,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises( @@ -444,7 +444,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_view(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.view( @@ -452,7 +452,7 @@ async def test_method_view(self, async_client: AsyncGradient) -> None: ) assert_matches_type(RouteViewResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_view(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.view( @@ -464,7 +464,7 @@ async def test_raw_response_view(self, async_client: AsyncGradient) -> None: route = await response.parse() assert_matches_type(RouteViewResponse, route, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_view(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.view( @@ -478,7 +478,7 @@ async def test_streaming_response_view(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_view(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 4b45edf7..d12e362e 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -17,7 +17,7 @@ class TestVersions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: version = client.agents.versions.update( @@ -25,7 +25,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: version = client.agents.versions.update( @@ -35,7 +35,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.versions.with_raw_response.update( @@ -47,7 +47,7 @@ def test_raw_response_update(self, client: Gradient) -> None: version = response.parse() assert_matches_type(VersionUpdateResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.versions.with_streaming_response.update( @@ -61,7 +61,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -69,7 +69,7 @@ def test_path_params_update(self, client: Gradient) -> None: path_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: version = client.agents.versions.list( @@ -77,7 +77,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(VersionListResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: version = client.agents.versions.list( @@ -87,7 +87,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(VersionListResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.versions.with_raw_response.list( @@ -99,7 +99,7 @@ def test_raw_response_list(self, client: Gradient) -> None: version = response.parse() assert_matches_type(VersionListResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.versions.with_streaming_response.list( @@ -113,7 +113,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -127,7 +127,7 @@ class TestAsyncVersions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.update( @@ -135,7 +135,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.update( @@ -145,7 +145,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.versions.with_raw_response.update( @@ -157,7 +157,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: version = await response.parse() assert_matches_type(VersionUpdateResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.versions.with_streaming_response.update( @@ -171,7 +171,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -179,7 +179,7 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.list( @@ -187,7 +187,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(VersionListResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: version = await async_client.agents.versions.list( @@ -197,7 +197,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(VersionListResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.versions.with_raw_response.list( @@ -209,7 +209,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: version = await response.parse() assert_matches_type(VersionListResponse, version, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.versions.with_streaming_response.list( @@ -223,7 +223,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index a25fd3c4..fce393fd 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -17,7 +17,7 @@ class TestCompletions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: completion = client.chat.completions.create( @@ -31,7 +31,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: completion = client.chat.completions.create( @@ -71,7 +71,7 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.chat.completions.with_raw_response.create( @@ -89,7 +89,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: completion = response.parse() assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.chat.completions.with_streaming_response.create( @@ -109,7 +109,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: completion_stream = client.chat.completions.create( @@ -124,7 +124,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) completion_stream.response.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: completion_stream = client.chat.completions.create( @@ -164,7 +164,7 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non ) completion_stream.response.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.chat.completions.with_raw_response.create( @@ -182,7 +182,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: stream = response.parse() stream.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.chat.completions.with_streaming_response.create( @@ -209,7 +209,7 @@ class TestAsyncCompletions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.chat.completions.create( @@ -223,7 +223,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: completion = await async_client.chat.completions.create( @@ -263,7 +263,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ) assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.chat.completions.with_raw_response.create( @@ -281,7 +281,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) completion = await response.parse() assert_matches_type(CompletionCreateResponse, completion, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.chat.completions.with_streaming_response.create( @@ -301,7 +301,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.chat.completions.create( @@ -316,7 +316,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) await completion_stream.response.aclose() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: completion_stream = await async_client.chat.completions.create( @@ -356,7 +356,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ) await completion_stream.response.aclose() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.chat.completions.with_raw_response.create( @@ -374,7 +374,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) stream = await response.parse() await stream.close() - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.chat.completions.with_streaming_response.create( diff --git a/tests/api_resources/databases/schema_registry/test_config.py b/tests/api_resources/databases/schema_registry/test_config.py index 024d8b0a..b1d21f62 100644 --- a/tests/api_resources/databases/schema_registry/test_config.py +++ b/tests/api_resources/databases/schema_registry/test_config.py @@ -22,7 +22,7 @@ class TestConfig: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: config = client.databases.schema_registry.config.retrieve( @@ -30,7 +30,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.retrieve( @@ -42,7 +42,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: config = response.parse() assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.retrieve( @@ -56,7 +56,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -64,7 +64,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: config = client.databases.schema_registry.config.update( @@ -73,7 +73,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(ConfigUpdateResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.update( @@ -86,7 +86,7 @@ def test_raw_response_update(self, client: Gradient) -> None: config = response.parse() assert_matches_type(ConfigUpdateResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.update( @@ -101,7 +101,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -110,7 +110,7 @@ def test_path_params_update(self, client: Gradient) -> None: compatibility_level="BACKWARD", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_subject(self, client: Gradient) -> None: config = client.databases.schema_registry.config.retrieve_subject( @@ -119,7 +119,7 @@ def test_method_retrieve_subject(self, client: Gradient) -> None: ) assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve_subject(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.retrieve_subject( @@ -132,7 +132,7 @@ def test_raw_response_retrieve_subject(self, client: Gradient) -> None: config = response.parse() assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve_subject(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.retrieve_subject( @@ -147,7 +147,7 @@ def test_streaming_response_retrieve_subject(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve_subject(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -162,7 +162,7 @@ def test_path_params_retrieve_subject(self, client: Gradient) -> None: database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_subject(self, client: Gradient) -> None: config = client.databases.schema_registry.config.update_subject( @@ -172,7 +172,7 @@ def test_method_update_subject(self, client: Gradient) -> None: ) assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update_subject(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.update_subject( @@ -186,7 +186,7 @@ def test_raw_response_update_subject(self, client: Gradient) -> None: config = response.parse() assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update_subject(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.update_subject( @@ -202,7 +202,7 @@ def test_streaming_response_update_subject(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update_subject(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -225,7 +225,7 @@ class TestAsyncConfig: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.retrieve( @@ -233,7 +233,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.retrieve( @@ -245,7 +245,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: config = await response.parse() assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.retrieve( @@ -259,7 +259,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -267,7 +267,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.update( @@ -276,7 +276,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ConfigUpdateResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.update( @@ -289,7 +289,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: config = await response.parse() assert_matches_type(ConfigUpdateResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.update( @@ -304,7 +304,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -313,7 +313,7 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: compatibility_level="BACKWARD", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_subject(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.retrieve_subject( @@ -322,7 +322,7 @@ async def test_method_retrieve_subject(self, async_client: AsyncGradient) -> Non ) assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve_subject(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject( @@ -335,7 +335,7 @@ async def test_raw_response_retrieve_subject(self, async_client: AsyncGradient) config = await response.parse() assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve_subject(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.retrieve_subject( @@ -350,7 +350,7 @@ async def test_streaming_response_retrieve_subject(self, async_client: AsyncGrad assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve_subject(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): @@ -365,7 +365,7 @@ async def test_path_params_retrieve_subject(self, async_client: AsyncGradient) - database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_subject(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.update_subject( @@ -375,7 +375,7 @@ async def test_method_update_subject(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update_subject(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.update_subject( @@ -389,7 +389,7 @@ async def test_raw_response_update_subject(self, async_client: AsyncGradient) -> config = await response.parse() assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update_subject(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.update_subject( @@ -405,7 +405,7 @@ async def test_streaming_response_update_subject(self, async_client: AsyncGradie assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update_subject(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"): diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py index 5a63c275..93817d1e 100644 --- a/tests/api_resources/gpu_droplets/account/test_keys.py +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -22,7 +22,7 @@ class TestKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.create( @@ -31,7 +31,7 @@ def test_method_create(self, client: Gradient) -> None: ) assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.create( @@ -44,7 +44,7 @@ def test_raw_response_create(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.create( @@ -59,7 +59,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.retrieve( @@ -67,7 +67,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.retrieve( @@ -79,7 +79,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.retrieve( @@ -93,7 +93,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.update( @@ -101,7 +101,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.update( @@ -110,7 +110,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.update( @@ -122,7 +122,7 @@ def test_raw_response_update(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.update( @@ -136,13 +136,13 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.list( @@ -151,7 +151,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.list() @@ -161,7 +161,7 @@ def test_raw_response_list(self, client: Gradient) -> None: key = response.parse() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.list() as response: @@ -173,7 +173,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: key = client.gpu_droplets.account.keys.delete( @@ -181,7 +181,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert key is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.account.keys.with_raw_response.delete( @@ -193,7 +193,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: key = response.parse() assert key is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.account.keys.with_streaming_response.delete( @@ -213,7 +213,7 @@ class TestAsyncKeys: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.create( @@ -222,7 +222,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.create( @@ -235,7 +235,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyCreateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.create( @@ -250,7 +250,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.retrieve( @@ -258,7 +258,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.retrieve( @@ -270,7 +270,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.retrieve( @@ -284,7 +284,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.update( @@ -292,7 +292,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.update( @@ -301,7 +301,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.update( @@ -313,7 +313,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyUpdateResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.update( @@ -327,13 +327,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.list() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.list( @@ -342,7 +342,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.list() @@ -352,7 +352,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: key = await response.parse() assert_matches_type(KeyListResponse, key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.list() as response: @@ -364,7 +364,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.gpu_droplets.account.keys.delete( @@ -372,7 +372,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert key is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.account.keys.with_raw_response.delete( @@ -384,7 +384,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: key = await response.parse() assert key is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.account.keys.with_streaming_response.delete( diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py index 8f39a064..693e315d 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -15,7 +15,7 @@ class TestDroplets: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add(self, client: Gradient) -> None: droplet = client.gpu_droplets.firewalls.droplets.add( @@ -24,7 +24,7 @@ def test_method_add(self, client: Gradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.droplets.with_raw_response.add( @@ -37,7 +37,7 @@ def test_raw_response_add(self, client: Gradient) -> None: droplet = response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.droplets.with_streaming_response.add( @@ -52,7 +52,7 @@ def test_streaming_response_add(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -61,7 +61,7 @@ def test_path_params_add(self, client: Gradient) -> None: droplet_ids=[49696269], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_remove(self, client: Gradient) -> None: droplet = client.gpu_droplets.firewalls.droplets.remove( @@ -70,7 +70,7 @@ def test_method_remove(self, client: Gradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.droplets.with_raw_response.remove( @@ -83,7 +83,7 @@ def test_raw_response_remove(self, client: Gradient) -> None: droplet = response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( @@ -98,7 +98,7 @@ def test_streaming_response_remove(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -113,7 +113,7 @@ class TestAsyncDroplets: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.firewalls.droplets.add( @@ -122,7 +122,7 @@ async def test_method_add(self, async_client: AsyncGradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( @@ -135,7 +135,7 @@ async def test_raw_response_add(self, async_client: AsyncGradient) -> None: droplet = await response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.add( @@ -150,7 +150,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -159,7 +159,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: droplet_ids=[49696269], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_remove(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.firewalls.droplets.remove( @@ -168,7 +168,7 @@ async def test_method_remove(self, async_client: AsyncGradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( @@ -181,7 +181,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: droplet = await response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( @@ -196,7 +196,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py index 2bd74228..27694390 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_rules.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -15,7 +15,7 @@ class TestRules: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.add( @@ -23,7 +23,7 @@ def test_method_add(self, client: Gradient) -> None: ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add_with_all_params(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.add( @@ -57,7 +57,7 @@ def test_method_add_with_all_params(self, client: Gradient) -> None: ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.rules.with_raw_response.add( @@ -69,7 +69,7 @@ def test_raw_response_add(self, client: Gradient) -> None: rule = response.parse() assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.rules.with_streaming_response.add( @@ -83,7 +83,7 @@ def test_streaming_response_add(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -91,7 +91,7 @@ def test_path_params_add(self, client: Gradient) -> None: firewall_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_remove(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.remove( @@ -99,7 +99,7 @@ def test_method_remove(self, client: Gradient) -> None: ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_remove_with_all_params(self, client: Gradient) -> None: rule = client.gpu_droplets.firewalls.rules.remove( @@ -133,7 +133,7 @@ def test_method_remove_with_all_params(self, client: Gradient) -> None: ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.rules.with_raw_response.remove( @@ -145,7 +145,7 @@ def test_raw_response_remove(self, client: Gradient) -> None: rule = response.parse() assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.rules.with_streaming_response.remove( @@ -159,7 +159,7 @@ def test_streaming_response_remove(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -173,7 +173,7 @@ class TestAsyncRules: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.add( @@ -181,7 +181,7 @@ async def test_method_add(self, async_client: AsyncGradient) -> None: ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.add( @@ -215,7 +215,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradient) -> ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( @@ -227,7 +227,7 @@ async def test_raw_response_add(self, async_client: AsyncGradient) -> None: rule = await response.parse() assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.add( @@ -241,7 +241,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -249,7 +249,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: firewall_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_remove(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.remove( @@ -257,7 +257,7 @@ async def test_method_remove(self, async_client: AsyncGradient) -> None: ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_remove_with_all_params(self, async_client: AsyncGradient) -> None: rule = await async_client.gpu_droplets.firewalls.rules.remove( @@ -291,7 +291,7 @@ async def test_method_remove_with_all_params(self, async_client: AsyncGradient) ) assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( @@ -303,7 +303,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: rule = await response.parse() assert rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.remove( @@ -317,7 +317,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py index cbd86f65..50c7563b 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_tags.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -15,7 +15,7 @@ class TestTags: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add(self, client: Gradient) -> None: tag = client.gpu_droplets.firewalls.tags.add( @@ -24,7 +24,7 @@ def test_method_add(self, client: Gradient) -> None: ) assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.tags.with_raw_response.add( @@ -37,7 +37,7 @@ def test_raw_response_add(self, client: Gradient) -> None: tag = response.parse() assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.tags.with_streaming_response.add( @@ -52,7 +52,7 @@ def test_streaming_response_add(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -61,7 +61,7 @@ def test_path_params_add(self, client: Gradient) -> None: tags=["frontend"], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_remove(self, client: Gradient) -> None: tag = client.gpu_droplets.firewalls.tags.remove( @@ -70,7 +70,7 @@ def test_method_remove(self, client: Gradient) -> None: ) assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.tags.with_raw_response.remove( @@ -83,7 +83,7 @@ def test_raw_response_remove(self, client: Gradient) -> None: tag = response.parse() assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.tags.with_streaming_response.remove( @@ -98,7 +98,7 @@ def test_streaming_response_remove(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -113,7 +113,7 @@ class TestAsyncTags: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add(self, async_client: AsyncGradient) -> None: tag = await async_client.gpu_droplets.firewalls.tags.add( @@ -122,7 +122,7 @@ async def test_method_add(self, async_client: AsyncGradient) -> None: ) assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( @@ -135,7 +135,7 @@ async def test_raw_response_add(self, async_client: AsyncGradient) -> None: tag = await response.parse() assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.add( @@ -150,7 +150,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -159,7 +159,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: tags=["frontend"], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_remove(self, async_client: AsyncGradient) -> None: tag = await async_client.gpu_droplets.firewalls.tags.remove( @@ -168,7 +168,7 @@ async def test_method_remove(self, async_client: AsyncGradient) -> None: ) assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( @@ -181,7 +181,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: tag = await response.parse() assert tag is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.remove( @@ -196,7 +196,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index 9417a880..0d678103 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -21,7 +21,7 @@ class TestActions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.create( @@ -30,7 +30,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( @@ -43,7 +43,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( @@ -58,7 +58,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_create_overload_1(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -67,7 +67,7 @@ def test_path_params_create_overload_1(self, client: Gradient) -> None: type="assign", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.create( @@ -77,7 +77,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( @@ -91,7 +91,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( @@ -107,7 +107,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_create_overload_2(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -117,7 +117,7 @@ def test_path_params_create_overload_2(self, client: Gradient) -> None: type="assign", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.retrieve( @@ -126,7 +126,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( @@ -139,7 +139,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( @@ -154,7 +154,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -163,7 +163,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: floating_ip="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.list( @@ -171,7 +171,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.list( @@ -183,7 +183,7 @@ def test_raw_response_list(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.list( @@ -197,7 +197,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -211,7 +211,7 @@ class TestAsyncActions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.create( @@ -220,7 +220,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( @@ -233,7 +233,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) action = await response.parse() assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( @@ -248,7 +248,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_create_overload_1(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -257,7 +257,7 @@ async def test_path_params_create_overload_1(self, async_client: AsyncGradient) type="assign", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.create( @@ -267,7 +267,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( @@ -281,7 +281,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) action = await response.parse() assert_matches_type(ActionCreateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( @@ -297,7 +297,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_create_overload_2(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -307,7 +307,7 @@ async def test_path_params_create_overload_2(self, async_client: AsyncGradient) type="assign", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.retrieve( @@ -316,7 +316,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( @@ -329,7 +329,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( @@ -344,7 +344,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -353,7 +353,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: floating_ip="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.list( @@ -361,7 +361,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( @@ -373,7 +373,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.list( @@ -387,7 +387,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index f59e3986..ad8b9585 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -18,7 +18,7 @@ class TestActions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.create( @@ -27,7 +27,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.create( @@ -40,7 +40,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: action = response.parse() assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.create( @@ -55,7 +55,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.create( @@ -65,7 +65,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.create( @@ -79,7 +79,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: action = response.parse() assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.create( @@ -95,7 +95,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.retrieve( @@ -104,7 +104,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.retrieve( @@ -117,7 +117,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: action = response.parse() assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.retrieve( @@ -132,7 +132,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.list( @@ -140,7 +140,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.list( @@ -152,7 +152,7 @@ def test_raw_response_list(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.list( @@ -172,7 +172,7 @@ class TestAsyncActions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.create( @@ -181,7 +181,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.create( @@ -194,7 +194,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) action = await response.parse() assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.create( @@ -209,7 +209,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.create( @@ -219,7 +219,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.create( @@ -233,7 +233,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) action = await response.parse() assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.create( @@ -249,7 +249,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.retrieve( @@ -258,7 +258,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.retrieve( @@ -271,7 +271,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(Action, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.retrieve( @@ -286,7 +286,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.list( @@ -294,7 +294,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.list( @@ -306,7 +306,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.list( diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py index 200dad39..e6eefd23 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -15,7 +15,7 @@ class TestDroplets: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add(self, client: Gradient) -> None: droplet = client.gpu_droplets.load_balancers.droplets.add( @@ -24,7 +24,7 @@ def test_method_add(self, client: Gradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.droplets.with_raw_response.add( @@ -37,7 +37,7 @@ def test_raw_response_add(self, client: Gradient) -> None: droplet = response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( @@ -52,7 +52,7 @@ def test_streaming_response_add(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -61,7 +61,7 @@ def test_path_params_add(self, client: Gradient) -> None: droplet_ids=[3164444, 3164445], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_remove(self, client: Gradient) -> None: droplet = client.gpu_droplets.load_balancers.droplets.remove( @@ -70,7 +70,7 @@ def test_method_remove(self, client: Gradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( @@ -83,7 +83,7 @@ def test_raw_response_remove(self, client: Gradient) -> None: droplet = response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( @@ -98,7 +98,7 @@ def test_streaming_response_remove(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -113,7 +113,7 @@ class TestAsyncDroplets: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.load_balancers.droplets.add( @@ -122,7 +122,7 @@ async def test_method_add(self, async_client: AsyncGradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( @@ -135,7 +135,7 @@ async def test_raw_response_add(self, async_client: AsyncGradient) -> None: droplet = await response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( @@ -150,7 +150,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -159,7 +159,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: droplet_ids=[3164444, 3164445], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_remove(self, async_client: AsyncGradient) -> None: droplet = await async_client.gpu_droplets.load_balancers.droplets.remove( @@ -168,7 +168,7 @@ async def test_method_remove(self, async_client: AsyncGradient) -> None: ) assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( @@ -181,7 +181,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: droplet = await response.parse() assert droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( @@ -196,7 +196,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py index 4f1decdf..a3cc0bd1 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -15,7 +15,7 @@ class TestForwardingRules: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_add(self, client: Gradient) -> None: forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.add( @@ -31,7 +31,7 @@ def test_method_add(self, client: Gradient) -> None: ) assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_add(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( @@ -51,7 +51,7 @@ def test_raw_response_add(self, client: Gradient) -> None: forwarding_rule = response.parse() assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_add(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( @@ -73,7 +73,7 @@ def test_streaming_response_add(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_add(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -89,7 +89,7 @@ def test_path_params_add(self, client: Gradient) -> None: ], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_remove(self, client: Gradient) -> None: forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.remove( @@ -105,7 +105,7 @@ def test_method_remove(self, client: Gradient) -> None: ) assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_remove(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( @@ -125,7 +125,7 @@ def test_raw_response_remove(self, client: Gradient) -> None: forwarding_rule = response.parse() assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_remove(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( @@ -147,7 +147,7 @@ def test_streaming_response_remove(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_remove(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -169,7 +169,7 @@ class TestAsyncForwardingRules: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_add(self, async_client: AsyncGradient) -> None: forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.add( @@ -185,7 +185,7 @@ async def test_method_add(self, async_client: AsyncGradient) -> None: ) assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_add(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( @@ -205,7 +205,7 @@ async def test_raw_response_add(self, async_client: AsyncGradient) -> None: forwarding_rule = await response.parse() assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_add(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( @@ -227,7 +227,7 @@ async def test_streaming_response_add(self, async_client: AsyncGradient) -> None assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_add(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -243,7 +243,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: ], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_remove(self, async_client: AsyncGradient) -> None: forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.remove( @@ -259,7 +259,7 @@ async def test_method_remove(self, async_client: AsyncGradient) -> None: ) assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( @@ -279,7 +279,7 @@ async def test_raw_response_remove(self, async_client: AsyncGradient) -> None: forwarding_rule = await response.parse() assert forwarding_rule is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_remove(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( @@ -301,7 +301,7 @@ async def test_streaming_response_remove(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_remove(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py index 7a52c608..e514196b 100644 --- a/tests/api_resources/gpu_droplets/test_actions.py +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -22,7 +22,7 @@ class TestActions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.actions.retrieve( @@ -31,7 +31,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.retrieve( @@ -44,7 +44,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.retrieve( @@ -59,7 +59,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.actions.list( @@ -67,7 +67,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: action = client.gpu_droplets.actions.list( @@ -77,7 +77,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.list( @@ -89,7 +89,7 @@ def test_raw_response_list(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.list( @@ -103,7 +103,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_bulk_initiate_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( @@ -111,7 +111,7 @@ def test_method_bulk_initiate_overload_1(self, client: Gradient) -> None: ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_bulk_initiate_with_all_params_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( @@ -120,7 +120,7 @@ def test_method_bulk_initiate_with_all_params_overload_1(self, client: Gradient) ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_bulk_initiate_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( @@ -132,7 +132,7 @@ def test_raw_response_bulk_initiate_overload_1(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_bulk_initiate_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( @@ -146,7 +146,7 @@ def test_streaming_response_bulk_initiate_overload_1(self, client: Gradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_bulk_initiate_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( @@ -154,7 +154,7 @@ def test_method_bulk_initiate_overload_2(self, client: Gradient) -> None: ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_bulk_initiate_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.bulk_initiate( @@ -164,7 +164,7 @@ def test_method_bulk_initiate_with_all_params_overload_2(self, client: Gradient) ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_bulk_initiate_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( @@ -176,7 +176,7 @@ def test_raw_response_bulk_initiate_overload_2(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_bulk_initiate_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( @@ -190,7 +190,7 @@ def test_streaming_response_bulk_initiate_overload_2(self, client: Gradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -199,7 +199,7 @@ def test_method_initiate_overload_1(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -212,7 +212,7 @@ def test_raw_response_initiate_overload_1(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -227,7 +227,7 @@ def test_streaming_response_initiate_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -236,7 +236,7 @@ def test_method_initiate_overload_2(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -250,7 +250,7 @@ def test_method_initiate_with_all_params_overload_2(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -263,7 +263,7 @@ def test_raw_response_initiate_overload_2(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -278,7 +278,7 @@ def test_streaming_response_initiate_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -287,7 +287,7 @@ def test_method_initiate_overload_3(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -301,7 +301,7 @@ def test_method_initiate_with_all_params_overload_3(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_3(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -314,7 +314,7 @@ def test_raw_response_initiate_overload_3(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_3(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -329,7 +329,7 @@ def test_streaming_response_initiate_overload_3(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_4(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -338,7 +338,7 @@ def test_method_initiate_overload_4(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_4(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -348,7 +348,7 @@ def test_method_initiate_with_all_params_overload_4(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_4(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -361,7 +361,7 @@ def test_raw_response_initiate_overload_4(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_4(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -376,7 +376,7 @@ def test_streaming_response_initiate_overload_4(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_5(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -385,7 +385,7 @@ def test_method_initiate_overload_5(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_5(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -396,7 +396,7 @@ def test_method_initiate_with_all_params_overload_5(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_5(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -409,7 +409,7 @@ def test_raw_response_initiate_overload_5(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_5(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -424,7 +424,7 @@ def test_streaming_response_initiate_overload_5(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_6(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -433,7 +433,7 @@ def test_method_initiate_overload_6(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_6(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -443,7 +443,7 @@ def test_method_initiate_with_all_params_overload_6(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_6(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -456,7 +456,7 @@ def test_raw_response_initiate_overload_6(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_6(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -471,7 +471,7 @@ def test_streaming_response_initiate_overload_6(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_7(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -480,7 +480,7 @@ def test_method_initiate_overload_7(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_7(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -490,7 +490,7 @@ def test_method_initiate_with_all_params_overload_7(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_7(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -503,7 +503,7 @@ def test_raw_response_initiate_overload_7(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_7(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -518,7 +518,7 @@ def test_streaming_response_initiate_overload_7(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_8(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -527,7 +527,7 @@ def test_method_initiate_overload_8(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_8(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -537,7 +537,7 @@ def test_method_initiate_with_all_params_overload_8(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_8(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -550,7 +550,7 @@ def test_raw_response_initiate_overload_8(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_8(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -565,7 +565,7 @@ def test_streaming_response_initiate_overload_8(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_overload_9(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -574,7 +574,7 @@ def test_method_initiate_overload_9(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_with_all_params_overload_9(self, client: Gradient) -> None: action = client.gpu_droplets.actions.initiate( @@ -584,7 +584,7 @@ def test_method_initiate_with_all_params_overload_9(self, client: Gradient) -> N ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_overload_9(self, client: Gradient) -> None: response = client.gpu_droplets.actions.with_raw_response.initiate( @@ -597,7 +597,7 @@ def test_raw_response_initiate_overload_9(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_overload_9(self, client: Gradient) -> None: with client.gpu_droplets.actions.with_streaming_response.initiate( @@ -618,7 +618,7 @@ class TestAsyncActions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.retrieve( @@ -627,7 +627,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.retrieve( @@ -640,7 +640,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.retrieve( @@ -655,7 +655,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.list( @@ -663,7 +663,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.list( @@ -673,7 +673,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.list( @@ -685,7 +685,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.list( @@ -699,7 +699,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( @@ -707,7 +707,7 @@ async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradient ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( @@ -716,7 +716,7 @@ async def test_method_bulk_initiate_with_all_params_overload_1(self, async_clien ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( @@ -728,7 +728,7 @@ async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGr action = await response.parse() assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( @@ -742,7 +742,7 @@ async def test_streaming_response_bulk_initiate_overload_1(self, async_client: A assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( @@ -750,7 +750,7 @@ async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradient ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.bulk_initiate( @@ -760,7 +760,7 @@ async def test_method_bulk_initiate_with_all_params_overload_2(self, async_clien ) assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( @@ -772,7 +772,7 @@ async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGr action = await response.parse() assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( @@ -786,7 +786,7 @@ async def test_streaming_response_bulk_initiate_overload_2(self, async_client: A assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -795,7 +795,7 @@ async def test_method_initiate_overload_1(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -808,7 +808,7 @@ async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -823,7 +823,7 @@ async def test_streaming_response_initiate_overload_1(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -832,7 +832,7 @@ async def test_method_initiate_overload_2(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -846,7 +846,7 @@ async def test_method_initiate_with_all_params_overload_2(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -859,7 +859,7 @@ async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -874,7 +874,7 @@ async def test_streaming_response_initiate_overload_2(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -883,7 +883,7 @@ async def test_method_initiate_overload_3(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -897,7 +897,7 @@ async def test_method_initiate_with_all_params_overload_3(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -910,7 +910,7 @@ async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -925,7 +925,7 @@ async def test_streaming_response_initiate_overload_3(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_4(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -934,7 +934,7 @@ async def test_method_initiate_overload_4(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -944,7 +944,7 @@ async def test_method_initiate_with_all_params_overload_4(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -957,7 +957,7 @@ async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -972,7 +972,7 @@ async def test_streaming_response_initiate_overload_4(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_5(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -981,7 +981,7 @@ async def test_method_initiate_overload_5(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -992,7 +992,7 @@ async def test_method_initiate_with_all_params_overload_5(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -1005,7 +1005,7 @@ async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -1020,7 +1020,7 @@ async def test_streaming_response_initiate_overload_5(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_6(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1029,7 +1029,7 @@ async def test_method_initiate_overload_6(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1039,7 +1039,7 @@ async def test_method_initiate_with_all_params_overload_6(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -1052,7 +1052,7 @@ async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -1067,7 +1067,7 @@ async def test_streaming_response_initiate_overload_6(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_7(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1076,7 +1076,7 @@ async def test_method_initiate_overload_7(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1086,7 +1086,7 @@ async def test_method_initiate_with_all_params_overload_7(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -1099,7 +1099,7 @@ async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -1114,7 +1114,7 @@ async def test_streaming_response_initiate_overload_7(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_8(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1123,7 +1123,7 @@ async def test_method_initiate_overload_8(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1133,7 +1133,7 @@ async def test_method_initiate_with_all_params_overload_8(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -1146,7 +1146,7 @@ async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( @@ -1161,7 +1161,7 @@ async def test_streaming_response_initiate_overload_8(self, async_client: AsyncG assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_overload_9(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1170,7 +1170,7 @@ async def test_method_initiate_overload_9(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.actions.initiate( @@ -1180,7 +1180,7 @@ async def test_method_initiate_with_all_params_overload_9(self, async_client: As ) assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.actions.with_raw_response.initiate( @@ -1193,7 +1193,7 @@ async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradien action = await response.parse() assert_matches_type(ActionInitiateResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.actions.with_streaming_response.initiate( diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index 16be3e00..cbf67b19 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -24,7 +24,7 @@ class TestAutoscale: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.create( @@ -42,7 +42,7 @@ def test_method_create(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.create( @@ -70,7 +70,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.create( @@ -92,7 +92,7 @@ def test_raw_response_create(self, client: Gradient) -> None: autoscale = response.parse() assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.create( @@ -116,7 +116,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.retrieve( @@ -124,7 +124,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.retrieve( @@ -136,7 +136,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: autoscale = response.parse() assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.retrieve( @@ -150,7 +150,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -158,7 +158,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.update( @@ -174,7 +174,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.update( @@ -197,7 +197,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.update( @@ -217,7 +217,7 @@ def test_raw_response_update(self, client: Gradient) -> None: autoscale = response.parse() assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.update( @@ -239,7 +239,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -255,13 +255,13 @@ def test_path_params_update(self, client: Gradient) -> None: name="my-autoscale-pool", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list() assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list( @@ -271,7 +271,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.list() @@ -281,7 +281,7 @@ def test_raw_response_list(self, client: Gradient) -> None: autoscale = response.parse() assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.list() as response: @@ -293,7 +293,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.delete( @@ -301,7 +301,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.delete( @@ -313,7 +313,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: autoscale = response.parse() assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.delete( @@ -327,7 +327,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -335,7 +335,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_dangerous(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.delete_dangerous( @@ -344,7 +344,7 @@ def test_method_delete_dangerous(self, client: Gradient) -> None: ) assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete_dangerous(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( @@ -357,7 +357,7 @@ def test_raw_response_delete_dangerous(self, client: Gradient) -> None: autoscale = response.parse() assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete_dangerous(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( @@ -372,7 +372,7 @@ def test_streaming_response_delete_dangerous(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete_dangerous(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -381,7 +381,7 @@ def test_path_params_delete_dangerous(self, client: Gradient) -> None: x_dangerous=True, ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_history(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_history( @@ -389,7 +389,7 @@ def test_method_list_history(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_history_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_history( @@ -399,7 +399,7 @@ def test_method_list_history_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_history(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.list_history( @@ -411,7 +411,7 @@ def test_raw_response_list_history(self, client: Gradient) -> None: autoscale = response.parse() assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_history(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.list_history( @@ -425,7 +425,7 @@ def test_streaming_response_list_history(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_history(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -433,7 +433,7 @@ def test_path_params_list_history(self, client: Gradient) -> None: autoscale_pool_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_members(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_members( @@ -441,7 +441,7 @@ def test_method_list_members(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_members_with_all_params(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.list_members( @@ -451,7 +451,7 @@ def test_method_list_members_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_members(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.list_members( @@ -463,7 +463,7 @@ def test_raw_response_list_members(self, client: Gradient) -> None: autoscale = response.parse() assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_members(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.list_members( @@ -477,7 +477,7 @@ def test_streaming_response_list_members(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_members(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -491,7 +491,7 @@ class TestAsyncAutoscale: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.create( @@ -509,7 +509,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.create( @@ -537,7 +537,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.create( @@ -559,7 +559,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: autoscale = await response.parse() assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.create( @@ -583,7 +583,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.retrieve( @@ -591,7 +591,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( @@ -603,7 +603,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: autoscale = await response.parse() assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.retrieve( @@ -617,7 +617,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -625,7 +625,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.update( @@ -641,7 +641,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.update( @@ -664,7 +664,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.update( @@ -684,7 +684,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: autoscale = await response.parse() assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.update( @@ -706,7 +706,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -722,13 +722,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: name="my-autoscale-pool", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list() assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list( @@ -738,7 +738,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.list() @@ -748,7 +748,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: autoscale = await response.parse() assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.list() as response: @@ -760,7 +760,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.delete( @@ -768,7 +768,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.delete( @@ -780,7 +780,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: autoscale = await response.parse() assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.delete( @@ -794,7 +794,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -802,7 +802,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.delete_dangerous( @@ -811,7 +811,7 @@ async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> Non ) assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( @@ -824,7 +824,7 @@ async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) autoscale = await response.parse() assert autoscale is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( @@ -839,7 +839,7 @@ async def test_streaming_response_delete_dangerous(self, async_client: AsyncGrad assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete_dangerous(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -848,7 +848,7 @@ async def test_path_params_delete_dangerous(self, async_client: AsyncGradient) - x_dangerous=True, ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_history(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_history( @@ -856,7 +856,7 @@ async def test_method_list_history(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_history_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_history( @@ -866,7 +866,7 @@ async def test_method_list_history_with_all_params(self, async_client: AsyncGrad ) assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_history(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.list_history( @@ -878,7 +878,7 @@ async def test_raw_response_list_history(self, async_client: AsyncGradient) -> N autoscale = await response.parse() assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_history(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.list_history( @@ -892,7 +892,7 @@ async def test_streaming_response_list_history(self, async_client: AsyncGradient assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_history(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): @@ -900,7 +900,7 @@ async def test_path_params_list_history(self, async_client: AsyncGradient) -> No autoscale_pool_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_members(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_members( @@ -908,7 +908,7 @@ async def test_method_list_members(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_members_with_all_params(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.list_members( @@ -918,7 +918,7 @@ async def test_method_list_members_with_all_params(self, async_client: AsyncGrad ) assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_members(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.list_members( @@ -930,7 +930,7 @@ async def test_raw_response_list_members(self, async_client: AsyncGradient) -> N autoscale = await response.parse() assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_members(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.list_members( @@ -944,7 +944,7 @@ async def test_streaming_response_list_members(self, async_client: AsyncGradient assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_members(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index ecff25de..4a0d36b9 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -22,7 +22,7 @@ class TestBackups: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list( @@ -30,7 +30,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(BackupListResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list( @@ -40,7 +40,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(BackupListResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.list( @@ -52,7 +52,7 @@ def test_raw_response_list(self, client: Gradient) -> None: backup = response.parse() assert_matches_type(BackupListResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.list( @@ -66,13 +66,13 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_policies(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list_policies() assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_policies_with_all_params(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list_policies( @@ -81,7 +81,7 @@ def test_method_list_policies_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_policies(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.list_policies() @@ -91,7 +91,7 @@ def test_raw_response_list_policies(self, client: Gradient) -> None: backup = response.parse() assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_policies(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.list_policies() as response: @@ -103,13 +103,13 @@ def test_streaming_response_list_policies(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_supported_policies(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.list_supported_policies() assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_supported_policies(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.list_supported_policies() @@ -119,7 +119,7 @@ def test_raw_response_list_supported_policies(self, client: Gradient) -> None: backup = response.parse() assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_supported_policies(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: @@ -131,7 +131,7 @@ def test_streaming_response_list_supported_policies(self, client: Gradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_policy(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.retrieve_policy( @@ -139,7 +139,7 @@ def test_method_retrieve_policy(self, client: Gradient) -> None: ) assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve_policy(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.retrieve_policy( @@ -151,7 +151,7 @@ def test_raw_response_retrieve_policy(self, client: Gradient) -> None: backup = response.parse() assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve_policy(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.retrieve_policy( @@ -171,7 +171,7 @@ class TestAsyncBackups: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list( @@ -179,7 +179,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(BackupListResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list( @@ -189,7 +189,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(BackupListResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.list( @@ -201,7 +201,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: backup = await response.parse() assert_matches_type(BackupListResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.list( @@ -215,13 +215,13 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_policies(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list_policies() assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_policies_with_all_params(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list_policies( @@ -230,7 +230,7 @@ async def test_method_list_policies_with_all_params(self, async_client: AsyncGra ) assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_policies(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.list_policies() @@ -240,7 +240,7 @@ async def test_raw_response_list_policies(self, async_client: AsyncGradient) -> backup = await response.parse() assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_policies(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.list_policies() as response: @@ -252,13 +252,13 @@ async def test_streaming_response_list_policies(self, async_client: AsyncGradien assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_supported_policies(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.list_supported_policies() assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_supported_policies(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.list_supported_policies() @@ -268,7 +268,7 @@ async def test_raw_response_list_supported_policies(self, async_client: AsyncGra backup = await response.parse() assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: @@ -280,7 +280,7 @@ async def test_streaming_response_list_supported_policies(self, async_client: As assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_policy(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.retrieve_policy( @@ -288,7 +288,7 @@ async def test_method_retrieve_policy(self, async_client: AsyncGradient) -> None ) assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve_policy(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.retrieve_policy( @@ -300,7 +300,7 @@ async def test_raw_response_retrieve_policy(self, async_client: AsyncGradient) - backup = await response.parse() assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.retrieve_policy( diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index 3715ced7..166206d2 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -20,7 +20,7 @@ class TestDestroyWithAssociatedResources: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.list( @@ -30,7 +30,7 @@ def test_method_list(self, client: Gradient) -> None: DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( @@ -44,7 +44,7 @@ def test_raw_response_list(self, client: Gradient) -> None: DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( @@ -60,7 +60,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_check_status(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.check_status( @@ -70,7 +70,7 @@ def test_method_check_status(self, client: Gradient) -> None: DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_check_status(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( @@ -84,7 +84,7 @@ def test_raw_response_check_status(self, client: Gradient) -> None: DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_check_status(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( @@ -100,7 +100,7 @@ def test_streaming_response_check_status(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_dangerous(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( @@ -109,7 +109,7 @@ def test_method_delete_dangerous(self, client: Gradient) -> None: ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete_dangerous(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( @@ -122,7 +122,7 @@ def test_raw_response_delete_dangerous(self, client: Gradient) -> None: destroy_with_associated_resource = response.parse() assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete_dangerous(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( @@ -137,7 +137,7 @@ def test_streaming_response_delete_dangerous(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_selective(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( @@ -145,7 +145,7 @@ def test_method_delete_selective(self, client: Gradient) -> None: ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_selective_with_all_params(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( @@ -158,7 +158,7 @@ def test_method_delete_selective_with_all_params(self, client: Gradient) -> None ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete_selective(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( @@ -170,7 +170,7 @@ def test_raw_response_delete_selective(self, client: Gradient) -> None: destroy_with_associated_resource = response.parse() assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete_selective(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( @@ -184,7 +184,7 @@ def test_streaming_response_delete_selective(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retry(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.retry( @@ -192,7 +192,7 @@ def test_method_retry(self, client: Gradient) -> None: ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retry(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( @@ -204,7 +204,7 @@ def test_raw_response_retry(self, client: Gradient) -> None: destroy_with_associated_resource = response.parse() assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retry(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( @@ -224,7 +224,7 @@ class TestAsyncDestroyWithAssociatedResources: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.list( @@ -234,7 +234,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( @@ -248,7 +248,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( @@ -264,7 +264,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_check_status(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( @@ -276,7 +276,7 @@ async def test_method_check_status(self, async_client: AsyncGradient) -> None: DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_check_status(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( @@ -290,7 +290,7 @@ async def test_raw_response_check_status(self, async_client: AsyncGradient) -> N DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_check_status(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( @@ -306,7 +306,7 @@ async def test_streaming_response_check_status(self, async_client: AsyncGradient assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( @@ -317,7 +317,7 @@ async def test_method_delete_dangerous(self, async_client: AsyncGradient) -> Non ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( @@ -330,7 +330,7 @@ async def test_raw_response_delete_dangerous(self, async_client: AsyncGradient) destroy_with_associated_resource = await response.parse() assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( @@ -345,7 +345,7 @@ async def test_streaming_response_delete_dangerous(self, async_client: AsyncGrad assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_selective(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( @@ -355,7 +355,7 @@ async def test_method_delete_selective(self, async_client: AsyncGradient) -> Non ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( @@ -370,7 +370,7 @@ async def test_method_delete_selective_with_all_params(self, async_client: Async ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete_selective(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( @@ -382,7 +382,7 @@ async def test_raw_response_delete_selective(self, async_client: AsyncGradient) destroy_with_associated_resource = await response.parse() assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete_selective(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( @@ -396,7 +396,7 @@ async def test_streaming_response_delete_selective(self, async_client: AsyncGrad assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retry(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.retry( @@ -404,7 +404,7 @@ async def test_method_retry(self, async_client: AsyncGradient) -> None: ) assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retry(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( @@ -416,7 +416,7 @@ async def test_raw_response_retry(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await response.parse() assert destroy_with_associated_resource is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retry(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 8585a114..83142b93 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -22,13 +22,13 @@ class TestFirewalls: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.create() assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.create( @@ -77,7 +77,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.create() @@ -87,7 +87,7 @@ def test_raw_response_create(self, client: Gradient) -> None: firewall = response.parse() assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.create() as response: @@ -99,7 +99,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.retrieve( @@ -107,7 +107,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.retrieve( @@ -119,7 +119,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: firewall = response.parse() assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.retrieve( @@ -133,7 +133,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -141,7 +141,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.update( @@ -150,7 +150,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.update( @@ -200,7 +200,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.update( @@ -213,7 +213,7 @@ def test_raw_response_update(self, client: Gradient) -> None: firewall = response.parse() assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.update( @@ -228,7 +228,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -237,13 +237,13 @@ def test_path_params_update(self, client: Gradient) -> None: firewall={"name": "frontend-firewall"}, ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.list() assert_matches_type(FirewallListResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.list( @@ -252,7 +252,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(FirewallListResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.list() @@ -262,7 +262,7 @@ def test_raw_response_list(self, client: Gradient) -> None: firewall = response.parse() assert_matches_type(FirewallListResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.list() as response: @@ -274,7 +274,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.delete( @@ -282,7 +282,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert firewall is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.delete( @@ -294,7 +294,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: firewall = response.parse() assert firewall is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.delete( @@ -308,7 +308,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -322,13 +322,13 @@ class TestAsyncFirewalls: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.create() assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.create( @@ -377,7 +377,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.create() @@ -387,7 +387,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: firewall = await response.parse() assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.create() as response: @@ -399,7 +399,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.retrieve( @@ -407,7 +407,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( @@ -419,7 +419,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: firewall = await response.parse() assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.retrieve( @@ -433,7 +433,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -441,7 +441,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.update( @@ -450,7 +450,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.update( @@ -500,7 +500,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.update( @@ -513,7 +513,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: firewall = await response.parse() assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.update( @@ -528,7 +528,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): @@ -537,13 +537,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: firewall={"name": "frontend-firewall"}, ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.list() assert_matches_type(FirewallListResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.list( @@ -552,7 +552,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(FirewallListResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.list() @@ -562,7 +562,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: firewall = await response.parse() assert_matches_type(FirewallListResponse, firewall, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.list() as response: @@ -574,7 +574,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.delete( @@ -582,7 +582,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert firewall is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.delete( @@ -594,7 +594,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: firewall = await response.parse() assert firewall is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.delete( @@ -608,7 +608,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index 9ac488d6..c252a24a 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -21,7 +21,7 @@ class TestFloatingIPs: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.create( @@ -29,7 +29,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.create( @@ -41,7 +41,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: floating_ip = response.parse() assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.create( @@ -55,7 +55,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.create( @@ -63,7 +63,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.create( @@ -72,7 +72,7 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non ) assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.create( @@ -84,7 +84,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: floating_ip = response.parse() assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.create( @@ -98,7 +98,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.retrieve( @@ -106,7 +106,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.retrieve( @@ -118,7 +118,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: floating_ip = response.parse() assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.retrieve( @@ -132,7 +132,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -140,13 +140,13 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.list() assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.list( @@ -155,7 +155,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.list() @@ -165,7 +165,7 @@ def test_raw_response_list(self, client: Gradient) -> None: floating_ip = response.parse() assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.list() as response: @@ -177,7 +177,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.delete( @@ -185,7 +185,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert floating_ip is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.delete( @@ -197,7 +197,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: floating_ip = response.parse() assert floating_ip is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.delete( @@ -211,7 +211,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -225,7 +225,7 @@ class TestAsyncFloatingIPs: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.create( @@ -233,7 +233,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( @@ -245,7 +245,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) floating_ip = await response.parse() assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( @@ -259,7 +259,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.create( @@ -267,7 +267,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.create( @@ -276,7 +276,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ) assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( @@ -288,7 +288,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) floating_ip = await response.parse() assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( @@ -302,7 +302,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.retrieve( @@ -310,7 +310,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( @@ -322,7 +322,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: floating_ip = await response.parse() assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.retrieve( @@ -336,7 +336,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): @@ -344,13 +344,13 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.list() assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.list( @@ -359,7 +359,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.list() @@ -369,7 +369,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: floating_ip = await response.parse() assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.list() as response: @@ -381,7 +381,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.delete( @@ -389,7 +389,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert floating_ip is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.delete( @@ -401,7 +401,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: floating_ip = await response.parse() assert floating_ip is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.delete( @@ -415,7 +415,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index bf6bfa4f..8f81912d 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -22,13 +22,13 @@ class TestImages: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: image = client.gpu_droplets.images.create() assert_matches_type(ImageCreateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: image = client.gpu_droplets.images.create( @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ImageCreateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.create() @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: Gradient) -> None: image = response.parse() assert_matches_type(ImageCreateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.create() as response: @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: image = client.gpu_droplets.images.retrieve( @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.retrieve( @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: image = response.parse() assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.retrieve( @@ -97,7 +97,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: image = client.gpu_droplets.images.update( @@ -105,7 +105,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(ImageUpdateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: image = client.gpu_droplets.images.update( @@ -116,7 +116,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ImageUpdateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.update( @@ -128,7 +128,7 @@ def test_raw_response_update(self, client: Gradient) -> None: image = response.parse() assert_matches_type(ImageUpdateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.update( @@ -142,13 +142,13 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: image = client.gpu_droplets.images.list() assert_matches_type(ImageListResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: image = client.gpu_droplets.images.list( @@ -160,7 +160,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ImageListResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.list() @@ -170,7 +170,7 @@ def test_raw_response_list(self, client: Gradient) -> None: image = response.parse() assert_matches_type(ImageListResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.list() as response: @@ -182,7 +182,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: image = client.gpu_droplets.images.delete( @@ -190,7 +190,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert image is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.delete( @@ -202,7 +202,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: image = response.parse() assert image is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.delete( @@ -222,13 +222,13 @@ class TestAsyncImages: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.create() assert_matches_type(ImageCreateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.create( @@ -241,7 +241,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(ImageCreateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.create() @@ -251,7 +251,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: image = await response.parse() assert_matches_type(ImageCreateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.create() as response: @@ -263,7 +263,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.retrieve( @@ -271,7 +271,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.retrieve( @@ -283,7 +283,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: image = await response.parse() assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.retrieve( @@ -297,7 +297,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.update( @@ -305,7 +305,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ImageUpdateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.update( @@ -316,7 +316,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(ImageUpdateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.update( @@ -328,7 +328,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: image = await response.parse() assert_matches_type(ImageUpdateResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.update( @@ -342,13 +342,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.list() assert_matches_type(ImageListResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.list( @@ -360,7 +360,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(ImageListResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.list() @@ -370,7 +370,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: image = await response.parse() assert_matches_type(ImageListResponse, image, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.list() as response: @@ -382,7 +382,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.delete( @@ -390,7 +390,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert image is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.delete( @@ -402,7 +402,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: image = await response.parse() assert image is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.delete( diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index f660f8f3..5db3c20b 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -22,7 +22,7 @@ class TestLoadBalancers: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( @@ -37,7 +37,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( @@ -108,7 +108,7 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.create( @@ -127,7 +127,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: load_balancer = response.parse() assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.create( @@ -148,7 +148,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( @@ -163,7 +163,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.create( @@ -234,7 +234,7 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.create( @@ -253,7 +253,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: load_balancer = response.parse() assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.create( @@ -274,7 +274,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.retrieve( @@ -282,7 +282,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.retrieve( @@ -294,7 +294,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: load_balancer = response.parse() assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.retrieve( @@ -308,7 +308,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -316,7 +316,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( @@ -332,7 +332,7 @@ def test_method_update_overload_1(self, client: Gradient) -> None: ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params_overload_1(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( @@ -404,7 +404,7 @@ def test_method_update_with_all_params_overload_1(self, client: Gradient) -> Non ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.update( @@ -424,7 +424,7 @@ def test_raw_response_update_overload_1(self, client: Gradient) -> None: load_balancer = response.parse() assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.update( @@ -446,7 +446,7 @@ def test_streaming_response_update_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update_overload_1(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -462,7 +462,7 @@ def test_path_params_update_overload_1(self, client: Gradient) -> None: ], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( @@ -478,7 +478,7 @@ def test_method_update_overload_2(self, client: Gradient) -> None: ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params_overload_2(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.update( @@ -550,7 +550,7 @@ def test_method_update_with_all_params_overload_2(self, client: Gradient) -> Non ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.update( @@ -570,7 +570,7 @@ def test_raw_response_update_overload_2(self, client: Gradient) -> None: load_balancer = response.parse() assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.update( @@ -592,7 +592,7 @@ def test_streaming_response_update_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update_overload_2(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -608,13 +608,13 @@ def test_path_params_update_overload_2(self, client: Gradient) -> None: ], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.list() assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.list( @@ -623,7 +623,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.list() @@ -633,7 +633,7 @@ def test_raw_response_list(self, client: Gradient) -> None: load_balancer = response.parse() assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.list() as response: @@ -645,7 +645,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.delete( @@ -653,7 +653,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.delete( @@ -665,7 +665,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: load_balancer = response.parse() assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.delete( @@ -679,7 +679,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -687,7 +687,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_cache(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.delete_cache( @@ -695,7 +695,7 @@ def test_method_delete_cache(self, client: Gradient) -> None: ) assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete_cache(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.delete_cache( @@ -707,7 +707,7 @@ def test_raw_response_delete_cache(self, client: Gradient) -> None: load_balancer = response.parse() assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete_cache(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( @@ -721,7 +721,7 @@ def test_streaming_response_delete_cache(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete_cache(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -735,7 +735,7 @@ class TestAsyncLoadBalancers: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( @@ -750,7 +750,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( @@ -821,7 +821,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( @@ -840,7 +840,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) load_balancer = await response.parse() assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( @@ -861,7 +861,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( @@ -876,7 +876,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.create( @@ -947,7 +947,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ) assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( @@ -966,7 +966,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) load_balancer = await response.parse() assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( @@ -987,7 +987,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.retrieve( @@ -995,7 +995,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( @@ -1007,7 +1007,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: load_balancer = await response.parse() assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.retrieve( @@ -1021,7 +1021,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -1029,7 +1029,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( @@ -1045,7 +1045,7 @@ async def test_method_update_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( @@ -1117,7 +1117,7 @@ async def test_method_update_with_all_params_overload_1(self, async_client: Asyn ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( @@ -1137,7 +1137,7 @@ async def test_raw_response_update_overload_1(self, async_client: AsyncGradient) load_balancer = await response.parse() assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( @@ -1159,7 +1159,7 @@ async def test_streaming_response_update_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update_overload_1(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -1175,7 +1175,7 @@ async def test_path_params_update_overload_1(self, async_client: AsyncGradient) ], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( @@ -1191,7 +1191,7 @@ async def test_method_update_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.update( @@ -1263,7 +1263,7 @@ async def test_method_update_with_all_params_overload_2(self, async_client: Asyn ) assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( @@ -1283,7 +1283,7 @@ async def test_raw_response_update_overload_2(self, async_client: AsyncGradient) load_balancer = await response.parse() assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( @@ -1305,7 +1305,7 @@ async def test_streaming_response_update_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update_overload_2(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -1321,13 +1321,13 @@ async def test_path_params_update_overload_2(self, async_client: AsyncGradient) ], ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.list() assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.list( @@ -1336,7 +1336,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.list() @@ -1346,7 +1346,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: load_balancer = await response.parse() assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.list() as response: @@ -1358,7 +1358,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.delete( @@ -1366,7 +1366,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete( @@ -1378,7 +1378,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: load_balancer = await response.parse() assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete( @@ -1392,7 +1392,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): @@ -1400,7 +1400,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_cache(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.delete_cache( @@ -1408,7 +1408,7 @@ async def test_method_delete_cache(self, async_client: AsyncGradient) -> None: ) assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete_cache(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( @@ -1420,7 +1420,7 @@ async def test_raw_response_delete_cache(self, async_client: AsyncGradient) -> N load_balancer = await response.parse() assert load_balancer is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete_cache(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( @@ -1434,7 +1434,7 @@ async def test_streaming_response_delete_cache(self, async_client: AsyncGradient assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete_cache(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py index ec934e9f..7fc4fe80 100644 --- a/tests/api_resources/gpu_droplets/test_sizes.py +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -17,13 +17,13 @@ class TestSizes: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: size = client.gpu_droplets.sizes.list() assert_matches_type(SizeListResponse, size, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: size = client.gpu_droplets.sizes.list( @@ -32,7 +32,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(SizeListResponse, size, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.sizes.with_raw_response.list() @@ -42,7 +42,7 @@ def test_raw_response_list(self, client: Gradient) -> None: size = response.parse() assert_matches_type(SizeListResponse, size, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.sizes.with_streaming_response.list() as response: @@ -60,13 +60,13 @@ class TestAsyncSizes: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: size = await async_client.gpu_droplets.sizes.list() assert_matches_type(SizeListResponse, size, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: size = await async_client.gpu_droplets.sizes.list( @@ -75,7 +75,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(SizeListResponse, size, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.sizes.with_raw_response.list() @@ -85,7 +85,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: size = await response.parse() assert_matches_type(SizeListResponse, size, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.sizes.with_streaming_response.list() as response: diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py index d4574ece..5f8da45a 100644 --- a/tests/api_resources/gpu_droplets/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -17,7 +17,7 @@ class TestSnapshots: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.retrieve( @@ -25,7 +25,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.snapshots.with_raw_response.retrieve( @@ -37,7 +37,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: snapshot = response.parse() assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.snapshots.with_streaming_response.retrieve( @@ -51,13 +51,13 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.list() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.list( @@ -67,7 +67,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.snapshots.with_raw_response.list() @@ -77,7 +77,7 @@ def test_raw_response_list(self, client: Gradient) -> None: snapshot = response.parse() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.snapshots.with_streaming_response.list() as response: @@ -89,7 +89,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: snapshot = client.gpu_droplets.snapshots.delete( @@ -97,7 +97,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.snapshots.with_raw_response.delete( @@ -109,7 +109,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: snapshot = response.parse() assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.snapshots.with_streaming_response.delete( @@ -129,7 +129,7 @@ class TestAsyncSnapshots: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.retrieve( @@ -137,7 +137,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.snapshots.with_raw_response.retrieve( @@ -149,7 +149,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.snapshots.with_streaming_response.retrieve( @@ -163,13 +163,13 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.list() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.list( @@ -179,7 +179,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.snapshots.with_raw_response.list() @@ -189,7 +189,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.snapshots.with_streaming_response.list() as response: @@ -201,7 +201,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.snapshots.delete( @@ -209,7 +209,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.snapshots.with_raw_response.delete( @@ -221,7 +221,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.snapshots.with_streaming_response.delete( diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index 49436220..f9b3778c 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -21,7 +21,7 @@ class TestVolumes: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( @@ -31,7 +31,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( @@ -46,7 +46,7 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.create( @@ -60,7 +60,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: volume = response.parse() assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.create( @@ -76,7 +76,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( @@ -86,7 +86,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.create( @@ -101,7 +101,7 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.create( @@ -115,7 +115,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: volume = response.parse() assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.create( @@ -131,7 +131,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.retrieve( @@ -139,7 +139,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.retrieve( @@ -151,7 +151,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: volume = response.parse() assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.retrieve( @@ -165,7 +165,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -173,13 +173,13 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.list() assert_matches_type(VolumeListResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.list( @@ -190,7 +190,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(VolumeListResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.list() @@ -200,7 +200,7 @@ def test_raw_response_list(self, client: Gradient) -> None: volume = response.parse() assert_matches_type(VolumeListResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.list() as response: @@ -212,7 +212,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete( @@ -220,7 +220,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.delete( @@ -232,7 +232,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: volume = response.parse() assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.delete( @@ -246,7 +246,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -254,13 +254,13 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_by_name(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete_by_name() assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_by_name_with_all_params(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete_by_name( @@ -269,7 +269,7 @@ def test_method_delete_by_name_with_all_params(self, client: Gradient) -> None: ) assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete_by_name(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.delete_by_name() @@ -279,7 +279,7 @@ def test_raw_response_delete_by_name(self, client: Gradient) -> None: volume = response.parse() assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete_by_name(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: @@ -297,7 +297,7 @@ class TestAsyncVolumes: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( @@ -307,7 +307,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( @@ -322,7 +322,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.create( @@ -336,7 +336,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) volume = await response.parse() assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.create( @@ -352,7 +352,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( @@ -362,7 +362,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.create( @@ -377,7 +377,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ) assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.create( @@ -391,7 +391,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) volume = await response.parse() assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.create( @@ -407,7 +407,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.retrieve( @@ -415,7 +415,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.retrieve( @@ -427,7 +427,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: volume = await response.parse() assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.retrieve( @@ -441,7 +441,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -449,13 +449,13 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.list() assert_matches_type(VolumeListResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.list( @@ -466,7 +466,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(VolumeListResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.list() @@ -476,7 +476,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: volume = await response.parse() assert_matches_type(VolumeListResponse, volume, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.list() as response: @@ -488,7 +488,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete( @@ -496,7 +496,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.delete( @@ -508,7 +508,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: volume = await response.parse() assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.delete( @@ -522,7 +522,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -530,13 +530,13 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_by_name(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete_by_name() assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete_by_name( @@ -545,7 +545,7 @@ async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGr ) assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete_by_name(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.delete_by_name() @@ -555,7 +555,7 @@ async def test_raw_response_delete_by_name(self, async_client: AsyncGradient) -> volume = await response.parse() assert volume is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete_by_name(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py index 19088e9e..7159db48 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_actions.py +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -22,7 +22,7 @@ class TestActions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.retrieve( @@ -31,7 +31,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_with_all_params(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.retrieve( @@ -42,7 +42,7 @@ def test_method_retrieve_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.retrieve( @@ -55,7 +55,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( @@ -70,7 +70,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -79,7 +79,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: volume_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.list( @@ -87,7 +87,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.list( @@ -97,7 +97,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.list( @@ -109,7 +109,7 @@ def test_raw_response_list(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.list( @@ -123,7 +123,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -131,7 +131,7 @@ def test_path_params_list(self, client: Gradient) -> None: volume_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_id_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( @@ -141,7 +141,7 @@ def test_method_initiate_by_id_overload_1(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_id_with_all_params_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( @@ -155,7 +155,7 @@ def test_method_initiate_by_id_with_all_params_overload_1(self, client: Gradient ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_by_id_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( @@ -169,7 +169,7 @@ def test_raw_response_initiate_by_id_overload_1(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_by_id_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( @@ -185,7 +185,7 @@ def test_streaming_response_initiate_by_id_overload_1(self, client: Gradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_initiate_by_id_overload_1(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -195,7 +195,7 @@ def test_path_params_initiate_by_id_overload_1(self, client: Gradient) -> None: type="attach", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_id_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( @@ -205,7 +205,7 @@ def test_method_initiate_by_id_overload_2(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_id_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( @@ -218,7 +218,7 @@ def test_method_initiate_by_id_with_all_params_overload_2(self, client: Gradient ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_by_id_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( @@ -232,7 +232,7 @@ def test_raw_response_initiate_by_id_overload_2(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_by_id_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( @@ -248,7 +248,7 @@ def test_streaming_response_initiate_by_id_overload_2(self, client: Gradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_initiate_by_id_overload_2(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -258,7 +258,7 @@ def test_path_params_initiate_by_id_overload_2(self, client: Gradient) -> None: type="attach", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_id_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( @@ -268,7 +268,7 @@ def test_method_initiate_by_id_overload_3(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_id_with_all_params_overload_3(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_id( @@ -281,7 +281,7 @@ def test_method_initiate_by_id_with_all_params_overload_3(self, client: Gradient ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_by_id_overload_3(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( @@ -295,7 +295,7 @@ def test_raw_response_initiate_by_id_overload_3(self, client: Gradient) -> None: action = response.parse() assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_by_id_overload_3(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( @@ -311,7 +311,7 @@ def test_streaming_response_initiate_by_id_overload_3(self, client: Gradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_initiate_by_id_overload_3(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -321,7 +321,7 @@ def test_path_params_initiate_by_id_overload_3(self, client: Gradient) -> None: type="attach", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_name_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( @@ -330,7 +330,7 @@ def test_method_initiate_by_name_overload_1(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_name_with_all_params_overload_1(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( @@ -343,7 +343,7 @@ def test_method_initiate_by_name_with_all_params_overload_1(self, client: Gradie ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_by_name_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( @@ -356,7 +356,7 @@ def test_raw_response_initiate_by_name_overload_1(self, client: Gradient) -> Non action = response.parse() assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_by_name_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( @@ -371,7 +371,7 @@ def test_streaming_response_initiate_by_name_overload_1(self, client: Gradient) assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_name_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( @@ -380,7 +380,7 @@ def test_method_initiate_by_name_overload_2(self, client: Gradient) -> None: ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_initiate_by_name_with_all_params_overload_2(self, client: Gradient) -> None: action = client.gpu_droplets.volumes.actions.initiate_by_name( @@ -392,7 +392,7 @@ def test_method_initiate_by_name_with_all_params_overload_2(self, client: Gradie ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_initiate_by_name_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( @@ -405,7 +405,7 @@ def test_raw_response_initiate_by_name_overload_2(self, client: Gradient) -> Non action = response.parse() assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_initiate_by_name_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( @@ -426,7 +426,7 @@ class TestAsyncActions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.retrieve( @@ -435,7 +435,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.retrieve( @@ -446,7 +446,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient ) assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( @@ -459,7 +459,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( @@ -474,7 +474,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -483,7 +483,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: volume_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.list( @@ -491,7 +491,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.list( @@ -501,7 +501,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.list( @@ -513,7 +513,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: action = await response.parse() assert_matches_type(ActionListResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.list( @@ -527,7 +527,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -535,7 +535,7 @@ async def test_path_params_list(self, async_client: AsyncGradient) -> None: volume_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( @@ -545,7 +545,7 @@ async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradien ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( @@ -559,7 +559,7 @@ async def test_method_initiate_by_id_with_all_params_overload_1(self, async_clie ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( @@ -573,7 +573,7 @@ async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncG action = await response.parse() assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( @@ -589,7 +589,7 @@ async def test_streaming_response_initiate_by_id_overload_1(self, async_client: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -599,7 +599,7 @@ async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGr type="attach", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( @@ -609,7 +609,7 @@ async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradien ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( @@ -622,7 +622,7 @@ async def test_method_initiate_by_id_with_all_params_overload_2(self, async_clie ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( @@ -636,7 +636,7 @@ async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncG action = await response.parse() assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( @@ -652,7 +652,7 @@ async def test_streaming_response_initiate_by_id_overload_2(self, async_client: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -662,7 +662,7 @@ async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGr type="attach", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( @@ -672,7 +672,7 @@ async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradien ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( @@ -685,7 +685,7 @@ async def test_method_initiate_by_id_with_all_params_overload_3(self, async_clie ) assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( @@ -699,7 +699,7 @@ async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncG action = await response.parse() assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( @@ -715,7 +715,7 @@ async def test_streaming_response_initiate_by_id_overload_3(self, async_client: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -725,7 +725,7 @@ async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGr type="attach", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( @@ -734,7 +734,7 @@ async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradi ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( @@ -747,7 +747,7 @@ async def test_method_initiate_by_name_with_all_params_overload_1(self, async_cl ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( @@ -760,7 +760,7 @@ async def test_raw_response_initiate_by_name_overload_1(self, async_client: Asyn action = await response.parse() assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( @@ -775,7 +775,7 @@ async def test_streaming_response_initiate_by_name_overload_1(self, async_client assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( @@ -784,7 +784,7 @@ async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradi ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( @@ -796,7 +796,7 @@ async def test_method_initiate_by_name_with_all_params_overload_2(self, async_cl ) assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( @@ -809,7 +809,7 @@ async def test_raw_response_initiate_by_name_overload_2(self, async_client: Asyn action = await response.parse() assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index 5037c7bb..e3450001 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -21,7 +21,7 @@ class TestSnapshots: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.create( @@ -30,7 +30,7 @@ def test_method_create(self, client: Gradient) -> None: ) assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.create( @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.create( @@ -53,7 +53,7 @@ def test_raw_response_create(self, client: Gradient) -> None: snapshot = response.parse() assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.create( @@ -68,7 +68,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_create(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -77,7 +77,7 @@ def test_path_params_create(self, client: Gradient) -> None: name="big-data-snapshot1475261774", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.retrieve( @@ -85,7 +85,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( @@ -97,7 +97,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: snapshot = response.parse() assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( @@ -111,7 +111,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): @@ -119,7 +119,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.list( @@ -127,7 +127,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.list( @@ -137,7 +137,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.list( @@ -149,7 +149,7 @@ def test_raw_response_list(self, client: Gradient) -> None: snapshot = response.parse() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.list( @@ -163,7 +163,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -171,7 +171,7 @@ def test_path_params_list(self, client: Gradient) -> None: volume_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.delete( @@ -179,7 +179,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.delete( @@ -191,7 +191,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: snapshot = response.parse() assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( @@ -205,7 +205,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): @@ -219,7 +219,7 @@ class TestAsyncSnapshots: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.create( @@ -228,7 +228,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: ) assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.create( @@ -238,7 +238,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( @@ -251,7 +251,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.create( @@ -266,7 +266,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -275,7 +275,7 @@ async def test_path_params_create(self, async_client: AsyncGradient) -> None: name="big-data-snapshot1475261774", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.retrieve( @@ -283,7 +283,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( @@ -295,7 +295,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( @@ -309,7 +309,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): @@ -317,7 +317,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.list( @@ -325,7 +325,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.list( @@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( @@ -347,7 +347,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.list( @@ -361,7 +361,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): @@ -369,7 +369,7 @@ async def test_path_params_list(self, async_client: AsyncGradient) -> None: volume_id="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.delete( @@ -377,7 +377,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( @@ -389,7 +389,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: snapshot = await response.parse() assert snapshot is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( @@ -403,7 +403,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index f22947ed..d9745710 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -23,13 +23,13 @@ class TestAPIKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: api_key = client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: api_key = client.inference.api_keys.create( @@ -37,7 +37,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.create() @@ -47,7 +47,7 @@ def test_raw_response_create(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.create() as response: @@ -59,7 +59,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: api_key = client.inference.api_keys.update( @@ -67,7 +67,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: api_key = client.inference.api_keys.update( @@ -77,7 +77,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.update( @@ -89,7 +89,7 @@ def test_raw_response_update(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.update( @@ -103,7 +103,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -111,13 +111,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: api_key = client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: api_key = client.inference.api_keys.list( @@ -126,7 +126,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.list() @@ -136,7 +136,7 @@ def test_raw_response_list(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.list() as response: @@ -148,7 +148,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: api_key = client.inference.api_keys.delete( @@ -156,7 +156,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.delete( @@ -168,7 +168,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.delete( @@ -182,7 +182,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -190,7 +190,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_regenerate(self, client: Gradient) -> None: api_key = client.inference.api_keys.update_regenerate( @@ -198,7 +198,7 @@ def test_method_update_regenerate(self, client: Gradient) -> None: ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update_regenerate(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.update_regenerate( @@ -210,7 +210,7 @@ def test_raw_response_update_regenerate(self, client: Gradient) -> None: api_key = response.parse() assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update_regenerate(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.update_regenerate( @@ -224,7 +224,7 @@ def test_streaming_response_update_regenerate(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update_regenerate(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -238,13 +238,13 @@ class TestAsyncAPIKeys: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.create( @@ -252,7 +252,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.create() @@ -262,7 +262,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.create() as response: @@ -274,7 +274,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update( @@ -282,7 +282,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update( @@ -292,7 +292,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.update( @@ -304,7 +304,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.update( @@ -318,7 +318,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -326,13 +326,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.list( @@ -341,7 +341,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.list() @@ -351,7 +351,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.list() as response: @@ -363,7 +363,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.delete( @@ -371,7 +371,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.delete( @@ -383,7 +383,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: api_key = await response.parse() assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.delete( @@ -397,7 +397,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -405,7 +405,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_regenerate(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update_regenerate( @@ -413,7 +413,7 @@ async def test_method_update_regenerate(self, async_client: AsyncGradient) -> No ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update_regenerate(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.update_regenerate( @@ -425,7 +425,7 @@ async def test_raw_response_update_regenerate(self, async_client: AsyncGradient) api_key = await response.parse() assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update_regenerate(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.update_regenerate( @@ -439,7 +439,7 @@ async def test_streaming_response_update_regenerate(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update_regenerate(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 0e44b584..bd7158d2 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -21,7 +21,7 @@ class TestDataSources: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.create( @@ -29,7 +29,7 @@ def test_method_create(self, client: Gradient) -> None: ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.create( @@ -55,7 +55,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.knowledge_bases.data_sources.with_raw_response.create( @@ -67,7 +67,7 @@ def test_raw_response_create(self, client: Gradient) -> None: data_source = response.parse() assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.knowledge_bases.data_sources.with_streaming_response.create( @@ -81,7 +81,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_create(self, client: Gradient) -> None: with pytest.raises( @@ -91,7 +91,7 @@ def test_path_params_create(self, client: Gradient) -> None: path_knowledge_base_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.list( @@ -99,7 +99,7 @@ def test_method_list(self, client: Gradient) -> None: ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.list( @@ -109,7 +109,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.knowledge_bases.data_sources.with_raw_response.list( @@ -121,7 +121,7 @@ def test_raw_response_list(self, client: Gradient) -> None: data_source = response.parse() assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.knowledge_bases.data_sources.with_streaming_response.list( @@ -135,7 +135,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): @@ -143,7 +143,7 @@ def test_path_params_list(self, client: Gradient) -> None: knowledge_base_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: data_source = client.knowledge_bases.data_sources.delete( @@ -152,7 +152,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.knowledge_bases.data_sources.with_raw_response.delete( @@ -165,7 +165,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: data_source = response.parse() assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.knowledge_bases.data_sources.with_streaming_response.delete( @@ -180,7 +180,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): @@ -201,7 +201,7 @@ class TestAsyncDataSources: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.create( @@ -209,7 +209,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None: ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.create( @@ -235,7 +235,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.create( @@ -247,7 +247,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: data_source = await response.parse() assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.create( @@ -261,7 +261,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_create(self, async_client: AsyncGradient) -> None: with pytest.raises( @@ -271,7 +271,7 @@ async def test_path_params_create(self, async_client: AsyncGradient) -> None: path_knowledge_base_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.list( @@ -279,7 +279,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.list( @@ -289,7 +289,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.list( @@ -301,7 +301,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: data_source = await response.parse() assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.list( @@ -315,7 +315,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): @@ -323,7 +323,7 @@ async def test_path_params_list(self, async_client: AsyncGradient) -> None: knowledge_base_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: data_source = await async_client.knowledge_bases.data_sources.delete( @@ -332,7 +332,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( @@ -345,7 +345,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: data_source = await response.parse() assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( @@ -360,7 +360,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index 231b22af..8840edfe 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -23,13 +23,13 @@ class TestIndexingJobs: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create( @@ -38,7 +38,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.create() @@ -48,7 +48,7 @@ def test_raw_response_create(self, client: Gradient) -> None: indexing_job = response.parse() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: @@ -60,7 +60,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.retrieve( @@ -68,7 +68,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( @@ -80,7 +80,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: indexing_job = response.parse() assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( @@ -94,7 +94,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -102,13 +102,13 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.list( @@ -117,7 +117,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.list() @@ -127,7 +127,7 @@ def test_raw_response_list(self, client: Gradient) -> None: indexing_job = response.parse() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: @@ -139,7 +139,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_data_sources(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( @@ -147,7 +147,7 @@ def test_method_retrieve_data_sources(self, client: Gradient) -> None: ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve_data_sources(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( @@ -159,7 +159,7 @@ def test_raw_response_retrieve_data_sources(self, client: Gradient) -> None: indexing_job = response.parse() assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve_data_sources(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( @@ -173,7 +173,7 @@ def test_streaming_response_retrieve_data_sources(self, client: Gradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve_data_sources(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): @@ -181,7 +181,7 @@ def test_path_params_retrieve_data_sources(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_cancel(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( @@ -189,7 +189,7 @@ def test_method_update_cancel(self, client: Gradient) -> None: ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_cancel_with_all_params(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( @@ -198,7 +198,7 @@ def test_method_update_cancel_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update_cancel(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( @@ -210,7 +210,7 @@ def test_raw_response_update_cancel(self, client: Gradient) -> None: indexing_job = response.parse() assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update_cancel(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( @@ -224,7 +224,7 @@ def test_streaming_response_update_cancel(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update_cancel(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -238,13 +238,13 @@ class TestAsyncIndexingJobs: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create( @@ -253,7 +253,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() @@ -263,7 +263,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: indexing_job = await response.parse() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: @@ -275,7 +275,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( @@ -283,7 +283,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( @@ -295,7 +295,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: indexing_job = await response.parse() assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( @@ -309,7 +309,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -317,13 +317,13 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.list( @@ -332,7 +332,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() @@ -342,7 +342,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: indexing_job = await response.parse() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: @@ -354,7 +354,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( @@ -362,7 +362,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradient) - ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( @@ -374,7 +374,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi indexing_job = await response.parse() assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( @@ -388,7 +388,7 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): @@ -396,7 +396,7 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_cancel(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( @@ -404,7 +404,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradient) -> None: ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( @@ -413,7 +413,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( @@ -425,7 +425,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradient) -> indexing_job = await response.parse() assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( @@ -439,7 +439,7 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update_cancel(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 5bb7a1e9..60cb0c16 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -24,13 +24,13 @@ class TestAnthropic: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.create() assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.create( @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.create() @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: Gradient) -> None: anthropic = response.parse() assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.create() as response: @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.retrieve( @@ -69,7 +69,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.retrieve( @@ -81,7 +81,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: anthropic = response.parse() assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.retrieve( @@ -95,7 +95,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -103,7 +103,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.update( @@ -111,7 +111,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.update( @@ -122,7 +122,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.update( @@ -134,7 +134,7 @@ def test_raw_response_update(self, client: Gradient) -> None: anthropic = response.parse() assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.update( @@ -148,7 +148,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -156,13 +156,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list() assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list( @@ -171,7 +171,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.list() @@ -181,7 +181,7 @@ def test_raw_response_list(self, client: Gradient) -> None: anthropic = response.parse() assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.list() as response: @@ -193,7 +193,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.delete( @@ -201,7 +201,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.delete( @@ -213,7 +213,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: anthropic = response.parse() assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.delete( @@ -227,7 +227,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -235,7 +235,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_agents(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list_agents( @@ -243,7 +243,7 @@ def test_method_list_agents(self, client: Gradient) -> None: ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_agents_with_all_params(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.list_agents( @@ -253,7 +253,7 @@ def test_method_list_agents_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_agents(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.list_agents( @@ -265,7 +265,7 @@ def test_raw_response_list_agents(self, client: Gradient) -> None: anthropic = response.parse() assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_agents(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.list_agents( @@ -279,7 +279,7 @@ def test_streaming_response_list_agents(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_list_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -293,13 +293,13 @@ class TestAsyncAnthropic: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.create() assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.create( @@ -308,7 +308,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.create() @@ -318,7 +318,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: anthropic = await response.parse() assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.create() as response: @@ -330,7 +330,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.retrieve( @@ -338,7 +338,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.retrieve( @@ -350,7 +350,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: anthropic = await response.parse() assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.retrieve( @@ -364,7 +364,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -372,7 +372,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.update( @@ -380,7 +380,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.update( @@ -391,7 +391,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.update( @@ -403,7 +403,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: anthropic = await response.parse() assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.update( @@ -417,7 +417,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -425,13 +425,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list() assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list( @@ -440,7 +440,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list() @@ -450,7 +450,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: anthropic = await response.parse() assert_matches_type(AnthropicListResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list() as response: @@ -462,7 +462,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.delete( @@ -470,7 +470,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.delete( @@ -482,7 +482,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: anthropic = await response.parse() assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.delete( @@ -496,7 +496,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -504,7 +504,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_agents(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( @@ -512,7 +512,7 @@ async def test_method_list_agents(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( @@ -522,7 +522,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list_agents( @@ -534,7 +534,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradient) -> No anthropic = await response.parse() assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list_agents( @@ -548,7 +548,7 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradient) assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_list_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index ed2cfc8e..8f9c1f80 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -24,13 +24,13 @@ class TestOpenAI: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: openai = client.models.providers.openai.create() assert_matches_type(OpenAICreateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.create( @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.create() @@ -49,7 +49,7 @@ def test_raw_response_create(self, client: Gradient) -> None: openai = response.parse() assert_matches_type(OpenAICreateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.create() as response: @@ -61,7 +61,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve( @@ -69,7 +69,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.retrieve( @@ -81,7 +81,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: openai = response.parse() assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.retrieve( @@ -95,7 +95,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -103,7 +103,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: openai = client.models.providers.openai.update( @@ -111,7 +111,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.update( @@ -122,7 +122,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.update( @@ -134,7 +134,7 @@ def test_raw_response_update(self, client: Gradient) -> None: openai = response.parse() assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.update( @@ -148,7 +148,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -156,13 +156,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: openai = client.models.providers.openai.list() assert_matches_type(OpenAIListResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.list( @@ -171,7 +171,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(OpenAIListResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.list() @@ -181,7 +181,7 @@ def test_raw_response_list(self, client: Gradient) -> None: openai = response.parse() assert_matches_type(OpenAIListResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.list() as response: @@ -193,7 +193,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: openai = client.models.providers.openai.delete( @@ -201,7 +201,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.delete( @@ -213,7 +213,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: openai = response.parse() assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.delete( @@ -227,7 +227,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -235,7 +235,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_agents(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve_agents( @@ -243,7 +243,7 @@ def test_method_retrieve_agents(self, client: Gradient) -> None: ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve_agents_with_all_params(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve_agents( @@ -253,7 +253,7 @@ def test_method_retrieve_agents_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve_agents(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.retrieve_agents( @@ -265,7 +265,7 @@ def test_raw_response_retrieve_agents(self, client: Gradient) -> None: openai = response.parse() assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve_agents(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.retrieve_agents( @@ -279,7 +279,7 @@ def test_streaming_response_retrieve_agents(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve_agents(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -293,13 +293,13 @@ class TestAsyncOpenAI: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.create() assert_matches_type(OpenAICreateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.create( @@ -308,7 +308,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.create() @@ -318,7 +318,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: openai = await response.parse() assert_matches_type(OpenAICreateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.create() as response: @@ -330,7 +330,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve( @@ -338,7 +338,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve( @@ -350,7 +350,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: openai = await response.parse() assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve( @@ -364,7 +364,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -372,7 +372,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.update( @@ -380,7 +380,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.update( @@ -391,7 +391,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.update( @@ -403,7 +403,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: openai = await response.parse() assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.update( @@ -417,7 +417,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): @@ -425,13 +425,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_api_key_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.list() assert_matches_type(OpenAIListResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.list( @@ -440,7 +440,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(OpenAIListResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.list() @@ -450,7 +450,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: openai = await response.parse() assert_matches_type(OpenAIListResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.list() as response: @@ -462,7 +462,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.delete( @@ -470,7 +470,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.delete( @@ -482,7 +482,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: openai = await response.parse() assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.delete( @@ -496,7 +496,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): @@ -504,7 +504,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_agents(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve_agents( @@ -512,7 +512,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradient) -> None ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve_agents( @@ -522,7 +522,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve_agents( @@ -534,7 +534,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradient) - openai = await response.parse() assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve_agents( @@ -548,7 +548,7 @@ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradi assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve_agents(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 77825f7e..dd4dbdc4 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -24,13 +24,13 @@ class TestAgents: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: agent = client.agents.create() assert_matches_type(AgentCreateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: agent = client.agents.create( @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.agents.with_raw_response.create() @@ -57,7 +57,7 @@ def test_raw_response_create(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentCreateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.agents.with_streaming_response.create() as response: @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: agent = client.agents.retrieve( @@ -77,7 +77,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.with_raw_response.retrieve( @@ -89,7 +89,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.with_streaming_response.retrieve( @@ -103,7 +103,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -111,7 +111,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: agent = client.agents.update( @@ -119,7 +119,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: agent = client.agents.update( @@ -144,7 +144,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.agents.with_raw_response.update( @@ -156,7 +156,7 @@ def test_raw_response_update(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.agents.with_streaming_response.update( @@ -170,7 +170,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -178,13 +178,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: agent = client.agents.list() assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: agent = client.agents.list( @@ -194,7 +194,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.agents.with_raw_response.list() @@ -204,7 +204,7 @@ def test_raw_response_list(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.agents.with_streaming_response.list() as response: @@ -216,7 +216,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: agent = client.agents.delete( @@ -224,7 +224,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.with_raw_response.delete( @@ -236,7 +236,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.with_streaming_response.delete( @@ -250,7 +250,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -258,7 +258,7 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_status(self, client: Gradient) -> None: agent = client.agents.update_status( @@ -266,7 +266,7 @@ def test_method_update_status(self, client: Gradient) -> None: ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_status_with_all_params(self, client: Gradient) -> None: agent = client.agents.update_status( @@ -276,7 +276,7 @@ def test_method_update_status_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update_status(self, client: Gradient) -> None: response = client.agents.with_raw_response.update_status( @@ -288,7 +288,7 @@ def test_raw_response_update_status(self, client: Gradient) -> None: agent = response.parse() assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update_status(self, client: Gradient) -> None: with client.agents.with_streaming_response.update_status( @@ -302,7 +302,7 @@ def test_streaming_response_update_status(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update_status(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -316,13 +316,13 @@ class TestAsyncAgents: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.create() assert_matches_type(AgentCreateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.create( @@ -339,7 +339,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.create() @@ -349,7 +349,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentCreateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.create() as response: @@ -361,7 +361,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.retrieve( @@ -369,7 +369,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.retrieve( @@ -381,7 +381,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.retrieve( @@ -395,7 +395,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -403,7 +403,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update( @@ -411,7 +411,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update( @@ -436,7 +436,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.update( @@ -448,7 +448,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.update( @@ -462,7 +462,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -470,13 +470,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.list() assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.list( @@ -486,7 +486,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.list() @@ -496,7 +496,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentListResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.list() as response: @@ -508,7 +508,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.delete( @@ -516,7 +516,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.delete( @@ -528,7 +528,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: agent = await response.parse() assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.delete( @@ -542,7 +542,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -550,7 +550,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_status(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update_status( @@ -558,7 +558,7 @@ async def test_method_update_status(self, async_client: AsyncGradient) -> None: ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.update_status( @@ -568,7 +568,7 @@ async def test_method_update_status_with_all_params(self, async_client: AsyncGra ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.update_status( @@ -580,7 +580,7 @@ async def test_raw_response_update_status(self, async_client: AsyncGradient) -> agent = await response.parse() assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.update_status( @@ -594,7 +594,7 @@ async def test_streaming_response_update_status(self, async_client: AsyncGradien assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update_status(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index 485fd5f9..0cb27fbb 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -25,7 +25,7 @@ class TestGPUDroplets: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_1(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( @@ -35,7 +35,7 @@ def test_method_create_overload_1(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( @@ -61,7 +61,7 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_1(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.create( @@ -75,7 +75,7 @@ def test_raw_response_create_overload_1(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_1(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.create( @@ -91,7 +91,7 @@ def test_streaming_response_create_overload_1(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_overload_2(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( @@ -101,7 +101,7 @@ def test_method_create_overload_2(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.create( @@ -127,7 +127,7 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create_overload_2(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.create( @@ -141,7 +141,7 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create_overload_2(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.create( @@ -157,7 +157,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.retrieve( @@ -165,7 +165,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.retrieve( @@ -177,7 +177,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.retrieve( @@ -191,13 +191,13 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list() assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list( @@ -209,7 +209,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list() @@ -219,7 +219,7 @@ def test_raw_response_list(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list() as response: @@ -231,7 +231,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.delete( @@ -239,7 +239,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.delete( @@ -251,7 +251,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: gpu_droplet = response.parse() assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.delete( @@ -265,7 +265,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete_by_tag(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.delete_by_tag( @@ -273,7 +273,7 @@ def test_method_delete_by_tag(self, client: Gradient) -> None: ) assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete_by_tag(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.delete_by_tag( @@ -285,7 +285,7 @@ def test_raw_response_delete_by_tag(self, client: Gradient) -> None: gpu_droplet = response.parse() assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete_by_tag(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.delete_by_tag( @@ -299,7 +299,7 @@ def test_streaming_response_delete_by_tag(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_firewalls(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_firewalls( @@ -307,7 +307,7 @@ def test_method_list_firewalls(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_firewalls_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_firewalls( @@ -317,7 +317,7 @@ def test_method_list_firewalls_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_firewalls(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_firewalls( @@ -329,7 +329,7 @@ def test_raw_response_list_firewalls(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_firewalls(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_firewalls( @@ -343,7 +343,7 @@ def test_streaming_response_list_firewalls(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_kernels(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_kernels( @@ -351,7 +351,7 @@ def test_method_list_kernels(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_kernels_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_kernels( @@ -361,7 +361,7 @@ def test_method_list_kernels_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_kernels(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_kernels( @@ -373,7 +373,7 @@ def test_raw_response_list_kernels(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_kernels(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_kernels( @@ -387,7 +387,7 @@ def test_streaming_response_list_kernels(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_neighbors(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_neighbors( @@ -395,7 +395,7 @@ def test_method_list_neighbors(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_neighbors(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_neighbors( @@ -407,7 +407,7 @@ def test_raw_response_list_neighbors(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_neighbors(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_neighbors( @@ -421,7 +421,7 @@ def test_streaming_response_list_neighbors(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_snapshots(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_snapshots( @@ -429,7 +429,7 @@ def test_method_list_snapshots(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_snapshots_with_all_params(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_snapshots( @@ -439,7 +439,7 @@ def test_method_list_snapshots_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list_snapshots(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_snapshots( @@ -451,7 +451,7 @@ def test_raw_response_list_snapshots(self, client: Gradient) -> None: gpu_droplet = response.parse() assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list_snapshots(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_snapshots( @@ -471,7 +471,7 @@ class TestAsyncGPUDroplets: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( @@ -481,7 +481,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> No ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( @@ -507,7 +507,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.create( @@ -521,7 +521,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) gpu_droplet = await response.parse() assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.create( @@ -537,7 +537,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( @@ -547,7 +547,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> No ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.create( @@ -573,7 +573,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn ) assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.create( @@ -587,7 +587,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) gpu_droplet = await response.parse() assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.create( @@ -603,7 +603,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.retrieve( @@ -611,7 +611,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.retrieve( @@ -623,7 +623,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: gpu_droplet = await response.parse() assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.retrieve( @@ -637,13 +637,13 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list() assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list( @@ -655,7 +655,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list() @@ -665,7 +665,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: gpu_droplet = await response.parse() assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list() as response: @@ -677,7 +677,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.delete( @@ -685,7 +685,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.delete( @@ -697,7 +697,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: gpu_droplet = await response.parse() assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.delete( @@ -711,7 +711,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete_by_tag(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.delete_by_tag( @@ -719,7 +719,7 @@ async def test_method_delete_by_tag(self, async_client: AsyncGradient) -> None: ) assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete_by_tag(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.delete_by_tag( @@ -731,7 +731,7 @@ async def test_raw_response_delete_by_tag(self, async_client: AsyncGradient) -> gpu_droplet = await response.parse() assert gpu_droplet is None - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.delete_by_tag( @@ -745,7 +745,7 @@ async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradien assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_firewalls(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_firewalls( @@ -753,7 +753,7 @@ async def test_method_list_firewalls(self, async_client: AsyncGradient) -> None: ) assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_firewalls( @@ -763,7 +763,7 @@ async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGr ) assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_firewalls(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_firewalls( @@ -775,7 +775,7 @@ async def test_raw_response_list_firewalls(self, async_client: AsyncGradient) -> gpu_droplet = await response.parse() assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_firewalls(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_firewalls( @@ -789,7 +789,7 @@ async def test_streaming_response_list_firewalls(self, async_client: AsyncGradie assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_kernels(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_kernels( @@ -797,7 +797,7 @@ async def test_method_list_kernels(self, async_client: AsyncGradient) -> None: ) assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_kernels( @@ -807,7 +807,7 @@ async def test_method_list_kernels_with_all_params(self, async_client: AsyncGrad ) assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_kernels(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_kernels( @@ -819,7 +819,7 @@ async def test_raw_response_list_kernels(self, async_client: AsyncGradient) -> N gpu_droplet = await response.parse() assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_kernels(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_kernels( @@ -833,7 +833,7 @@ async def test_streaming_response_list_kernels(self, async_client: AsyncGradient assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_neighbors(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_neighbors( @@ -841,7 +841,7 @@ async def test_method_list_neighbors(self, async_client: AsyncGradient) -> None: ) assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_neighbors(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_neighbors( @@ -853,7 +853,7 @@ async def test_raw_response_list_neighbors(self, async_client: AsyncGradient) -> gpu_droplet = await response.parse() assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_neighbors(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_neighbors( @@ -867,7 +867,7 @@ async def test_streaming_response_list_neighbors(self, async_client: AsyncGradie assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_snapshots(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_snapshots( @@ -875,7 +875,7 @@ async def test_method_list_snapshots(self, async_client: AsyncGradient) -> None: ) assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_snapshots( @@ -885,7 +885,7 @@ async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGr ) assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list_snapshots(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_snapshots( @@ -897,7 +897,7 @@ async def test_raw_response_list_snapshots(self, async_client: AsyncGradient) -> gpu_droplet = await response.parse() assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list_snapshots(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_snapshots( diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 23945480..82698131 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -23,13 +23,13 @@ class TestKnowledgeBases: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.create() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_create_with_all_params(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.create( @@ -76,7 +76,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_create(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.create() @@ -86,7 +86,7 @@ def test_raw_response_create(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_create(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.create() as response: @@ -98,7 +98,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_retrieve(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.retrieve( @@ -106,7 +106,7 @@ def test_method_retrieve(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.retrieve( @@ -118,7 +118,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.retrieve( @@ -132,7 +132,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_retrieve(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -140,7 +140,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.update( @@ -148,7 +148,7 @@ def test_method_update(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_with_all_params(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.update( @@ -162,7 +162,7 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_update(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.update( @@ -174,7 +174,7 @@ def test_raw_response_update(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_update(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.update( @@ -188,7 +188,7 @@ def test_streaming_response_update(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_update(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -196,13 +196,13 @@ def test_path_params_update(self, client: Gradient) -> None: path_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.list() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.list( @@ -211,7 +211,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.list() @@ -221,7 +221,7 @@ def test_raw_response_list(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.list() as response: @@ -233,7 +233,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_delete(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.delete( @@ -241,7 +241,7 @@ def test_method_delete(self, client: Gradient) -> None: ) assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.delete( @@ -253,7 +253,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: knowledge_base = response.parse() assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.delete( @@ -267,7 +267,7 @@ def test_streaming_response_delete(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_path_params_delete(self, client: Gradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -281,13 +281,13 @@ class TestAsyncKnowledgeBases: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.create() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.create( @@ -334,7 +334,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_create(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.create() @@ -344,7 +344,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.create() as response: @@ -356,7 +356,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.retrieve( @@ -364,7 +364,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.retrieve( @@ -376,7 +376,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.retrieve( @@ -390,7 +390,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): @@ -398,7 +398,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: "", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.update( @@ -406,7 +406,7 @@ async def test_method_update(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.update( @@ -420,7 +420,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_update(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.update( @@ -432,7 +432,7 @@ async def test_raw_response_update(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_update(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.update( @@ -446,7 +446,7 @@ async def test_streaming_response_update(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_update(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): @@ -454,13 +454,13 @@ async def test_path_params_update(self, async_client: AsyncGradient) -> None: path_uuid="", ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.list() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.list( @@ -469,7 +469,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.list() @@ -479,7 +479,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.list() as response: @@ -491,7 +491,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.delete( @@ -499,7 +499,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: ) assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.delete( @@ -511,7 +511,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: knowledge_base = await response.parse() assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.delete( @@ -525,7 +525,7 @@ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_path_params_delete(self, async_client: AsyncGradient) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 7b2a5a4a..8e6edaef 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -17,13 +17,13 @@ class TestModels: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: model = client.models.list( @@ -34,7 +34,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.models.with_raw_response.list() @@ -44,7 +44,7 @@ def test_raw_response_list(self, client: Gradient) -> None: model = response.parse() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.models.with_streaming_response.list() as response: @@ -62,13 +62,13 @@ class TestAsyncModels: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: model = await async_client.models.list( @@ -79,7 +79,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.models.with_raw_response.list() @@ -89,7 +89,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: model = await response.parse() assert_matches_type(ModelListResponse, model, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.models.with_streaming_response.list() as response: diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 5bf67e91..8cbf6afb 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -17,13 +17,13 @@ class TestRegions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list(self, client: Gradient) -> None: region = client.regions.list() assert_matches_type(RegionListResponse, region, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_list_with_all_params(self, client: Gradient) -> None: region = client.regions.list( @@ -32,7 +32,7 @@ def test_method_list_with_all_params(self, client: Gradient) -> None: ) assert_matches_type(RegionListResponse, region, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.regions.with_raw_response.list() @@ -42,7 +42,7 @@ def test_raw_response_list(self, client: Gradient) -> None: region = response.parse() assert_matches_type(RegionListResponse, region, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.regions.with_streaming_response.list() as response: @@ -60,13 +60,13 @@ class TestAsyncRegions: "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: region = await async_client.regions.list() assert_matches_type(RegionListResponse, region, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: region = await async_client.regions.list( @@ -75,7 +75,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> ) assert_matches_type(RegionListResponse, region, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.regions.with_raw_response.list() @@ -85,7 +85,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: region = await response.parse() assert_matches_type(RegionListResponse, region, path=["response"]) - @pytest.mark.skip() + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.regions.with_streaming_response.list() as response: From 5619442ffa31122be13027846529555d7782a423 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 16:58:18 +0000 Subject: [PATCH 153/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5e212f31..2ce88448 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.3" + ".": "3.0.0-beta.4" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index e069fc01..e6d2ae78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.3" +version = "3.0.0-beta.4" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 483c7ac9..428a5fa9 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.3" # x-release-please-version +__version__ = "3.0.0-beta.4" # x-release-please-version From 6384052ba4173985fb6e94a79245a05102129faf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 22 Aug 2025 07:23:46 +0000 Subject: [PATCH 154/200] chore: update github action --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 05a89405..82ae9514 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: run: ./scripts/lint build: - if: github.repository == 'stainless-sdks/gradient-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork timeout-minutes: 10 name: build permissions: @@ -61,12 +61,14 @@ jobs: run: rye build - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/gradient-python' id: github-oidc uses: actions/github-script@v6 with: script: core.setOutput('github_token', await core.getIDToken()); - name: Upload tarball + if: github.repository == 'stainless-sdks/gradient-python' env: URL: https://pkg.stainless.com/s AUTH: ${{ steps.github-oidc.outputs.github_token }} From fb4281bd4ba8585800963d4c3a0929f2c7c06382 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 22 Aug 2025 14:42:56 +0000 Subject: [PATCH 155/200] feat(api): manual updates --- .stats.yml | 4 ++-- src/gradient/_client.py | 6 ++++++ src/gradient/resources/agents/chat/completions.py | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.stats.yml b/.stats.yml index 7b81dd11..a2ee5ad3 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9aca3802735e1375125412aa28ac36bf2175144b8218610a73d2e7f775694dff.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9ecd7b6ec2c5f4b9dddde78a975eba587128529bcd7cea1fc82e041ad9450420.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 99e3cd5dde0beb796f4547410869f726 +config_hash: 868a2e440ec27146f18b1327daa1cd63 diff --git a/src/gradient/_client.py b/src/gradient/_client.py index c745eeec..37c539f9 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -95,6 +95,7 @@ def __init__( - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN` - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` + - `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT` """ if access_token is None: access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") @@ -108,6 +109,8 @@ def __init__( agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") self.agent_access_key = agent_access_key + if agent_endpoint is None: + agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT") self.agent_endpoint = agent_endpoint self.inference_endpoint = inference_endpoint @@ -356,6 +359,7 @@ def __init__( - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN` - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` + - `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT` """ if access_token is None: access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") @@ -369,6 +373,8 @@ def __init__( agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") self.agent_access_key = agent_access_key + if agent_endpoint is None: + agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT") self.agent_endpoint = agent_endpoint self.inference_endpoint = inference_endpoint diff --git a/src/gradient/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py index 0ff797bf..d86631df 100644 --- a/src/gradient/resources/agents/chat/completions.py +++ b/src/gradient/resources/agents/chat/completions.py @@ -463,7 +463,7 @@ def create( return self._post( "/chat/completions?agent=true" if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions?agent=true", + else f"{self._client.agent_endpoint}/api/v1/chat/completions?agent=true", body=maybe_transform( { "messages": messages, @@ -936,7 +936,7 @@ async def create( return await self._post( "/chat/completions?agent=true" if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions?agent=true", + else f"{self._client.agent_endpoint}/api/v1/chat/completions?agent=true", body=await async_maybe_transform( { "messages": messages, From 2053c0d8fabfcaa04aa6dc83194d1d507e6266d2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 22 Aug 2025 15:17:06 +0000 Subject: [PATCH 156/200] feat(api): manual updates --- .stats.yml | 4 ++-- src/gradient/_client.py | 6 ++++++ src/gradient/resources/chat/completions.py | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.stats.yml b/.stats.yml index a2ee5ad3..4a621094 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9ecd7b6ec2c5f4b9dddde78a975eba587128529bcd7cea1fc82e041ad9450420.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 868a2e440ec27146f18b1327daa1cd63 +config_hash: 6c8d569b60ae6536708a165b72ff838f diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 37c539f9..5343a6eb 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -96,6 +96,7 @@ def __init__( - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` - `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT` + - `inference_endpoint` from `GRADIENT_INFERENCE_ENDPOINT` """ if access_token is None: access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") @@ -113,6 +114,8 @@ def __init__( agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT") self.agent_endpoint = agent_endpoint + if inference_endpoint is None: + inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "inference.do-ai.run" self.inference_endpoint = inference_endpoint if base_url is None: @@ -360,6 +363,7 @@ def __init__( - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` - `agent_endpoint` from `GRADIENT_AGENT_ENDPOINT` + - `inference_endpoint` from `GRADIENT_INFERENCE_ENDPOINT` """ if access_token is None: access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") @@ -377,6 +381,8 @@ def __init__( agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT") self.agent_endpoint = agent_endpoint + if inference_endpoint is None: + inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "inference.do-ai.run" self.inference_endpoint = inference_endpoint if base_url is None: diff --git a/src/gradient/resources/chat/completions.py b/src/gradient/resources/chat/completions.py index 4dc98fa5..d988ac29 100644 --- a/src/gradient/resources/chat/completions.py +++ b/src/gradient/resources/chat/completions.py @@ -463,7 +463,7 @@ def create( return self._post( "/chat/completions" if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions", + else f"{self._client.inference_endpoint}/v1/chat/completions", body=maybe_transform( { "messages": messages, @@ -936,7 +936,7 @@ async def create( return await self._post( "/chat/completions" if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions", + else f"{self._client.inference_endpoint}/v1/chat/completions", body=await async_maybe_transform( { "messages": messages, From 2a802c9fb3cb38ccabf8c4ac62ab8bccb4a93af7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 05:51:48 +0000 Subject: [PATCH 157/200] chore(internal): change ci workflow machines --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82ae9514..ec9a4813 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: permissions: contents: read id-token: write - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/gradient-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 From a899536170be7e726217e3a8e4dd66115b28dbfc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 08:25:10 +0000 Subject: [PATCH 158/200] fix: avoid newer type syntax --- src/gradient/_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gradient/_models.py b/src/gradient/_models.py index b8387ce9..92f7c10b 100644 --- a/src/gradient/_models.py +++ b/src/gradient/_models.py @@ -304,7 +304,7 @@ def model_dump( exclude_none=exclude_none, ) - return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped + return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped @override def model_dump_json( From d5c283abf1f01cdc92c54c2d130cdaba6afecacc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 08:39:14 +0000 Subject: [PATCH 159/200] chore(internal): update pyright exclude list --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index e6d2ae78..595bb661 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,6 +148,7 @@ exclude = [ "_dev", ".venv", ".nox", + ".git", ] reportImplicitOverride = true From 08673e57ee3bed01217aa5c33c887cd0d36fdf6d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 30 Aug 2025 04:25:40 +0000 Subject: [PATCH 160/200] chore(internal): add Sequence related utils --- src/gradient/_types.py | 36 ++++++++++++++++++++++++++++++++- src/gradient/_utils/__init__.py | 1 + src/gradient/_utils/_typing.py | 5 +++++ tests/utils.py | 10 ++++++++- 4 files changed, 50 insertions(+), 2 deletions(-) diff --git a/src/gradient/_types.py b/src/gradient/_types.py index b44bb2d9..32375713 100644 --- a/src/gradient/_types.py +++ b/src/gradient/_types.py @@ -13,10 +13,21 @@ Mapping, TypeVar, Callable, + Iterator, Optional, Sequence, ) -from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable +from typing_extensions import ( + Set, + Literal, + Protocol, + TypeAlias, + TypedDict, + SupportsIndex, + overload, + override, + runtime_checkable, +) import httpx import pydantic @@ -217,3 +228,26 @@ class _GenericAlias(Protocol): class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth follow_redirects: bool + + +_T_co = TypeVar("_T_co", covariant=True) + + +if TYPE_CHECKING: + # This works because str.__contains__ does not accept object (either in typeshed or at runtime) + # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285 + class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... + def __contains__(self, value: object, /) -> bool: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T_co]: ... + def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ... + def count(self, value: Any, /) -> int: ... + def __reversed__(self) -> Iterator[_T_co]: ... +else: + # just point this to a normal `Sequence` at runtime to avoid having to special case + # deserializing our custom sequence type + SequenceNotStr = Sequence diff --git a/src/gradient/_utils/__init__.py b/src/gradient/_utils/__init__.py index d4fda26f..ca547ce5 100644 --- a/src/gradient/_utils/__init__.py +++ b/src/gradient/_utils/__init__.py @@ -38,6 +38,7 @@ extract_type_arg as extract_type_arg, is_iterable_type as is_iterable_type, is_required_type as is_required_type, + is_sequence_type as is_sequence_type, is_annotated_type as is_annotated_type, is_type_alias_type as is_type_alias_type, strip_annotated_type as strip_annotated_type, diff --git a/src/gradient/_utils/_typing.py b/src/gradient/_utils/_typing.py index 1bac9542..845cd6b2 100644 --- a/src/gradient/_utils/_typing.py +++ b/src/gradient/_utils/_typing.py @@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool: return (get_origin(typ) or typ) == list +def is_sequence_type(typ: type) -> bool: + origin = get_origin(typ) or typ + return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence + + def is_iterable_type(typ: type) -> bool: """If the given type is `typing.Iterable[T]`""" origin = get_origin(typ) or typ diff --git a/tests/utils.py b/tests/utils.py index e150f00b..ac014538 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -4,7 +4,7 @@ import inspect import traceback import contextlib -from typing import Any, TypeVar, Iterator, cast +from typing import Any, TypeVar, Iterator, Sequence, cast from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type @@ -15,6 +15,7 @@ is_list_type, is_union_type, extract_type_arg, + is_sequence_type, is_annotated_type, is_type_alias_type, ) @@ -71,6 +72,13 @@ def assert_matches_type( if is_list_type(type_): return _assert_list_type(type_, value) + if is_sequence_type(type_): + assert isinstance(value, Sequence) + inner_type = get_args(type_)[0] + for entry in value: # type: ignore + assert_type(inner_type, entry) # type: ignore + return + if origin == str: assert isinstance(value, str) elif origin == int: From 35482fb3be01373ec2fb720ba232e3e6529d4564 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 3 Sep 2025 03:51:06 +0000 Subject: [PATCH 161/200] feat(types): replace List[str] with SequenceNotStr in params --- src/gradient/_utils/_transform.py | 6 +++ src/gradient/resources/agents/agents.py | 16 +++---- .../resources/agents/chat/completions.py | 20 ++++---- .../evaluation_metrics/workspaces/agents.py | 8 ++-- .../workspaces/workspaces.py | 8 ++-- .../resources/agents/evaluation_runs.py | 8 ++-- .../resources/agents/evaluation_test_cases.py | 8 ++-- src/gradient/resources/chat/completions.py | 20 ++++---- .../destroy_with_associated_resources.py | 24 +++++----- .../resources/gpu_droplets/firewalls/tags.py | 12 ++--- .../resources/gpu_droplets/gpu_droplets.py | 48 +++++++++---------- .../resources/gpu_droplets/images/images.py | 8 ++-- .../load_balancers/load_balancers.py | 28 +++++------ .../resources/gpu_droplets/volumes/actions.py | 20 ++++---- .../gpu_droplets/volumes/snapshots.py | 8 ++-- .../resources/gpu_droplets/volumes/volumes.py | 16 +++---- .../knowledge_bases/indexing_jobs.py | 8 ++-- .../knowledge_bases/knowledge_bases.py | 12 ++--- src/gradient/types/agent_create_params.py | 6 +-- src/gradient/types/agent_update_params.py | 4 +- .../agents/chat/completion_create_params.py | 14 +++--- .../workspace_create_params.py | 5 +- .../workspaces/agent_move_params.py | 4 +- .../agents/evaluation_run_create_params.py | 5 +- .../evaluation_test_case_create_params.py | 4 +- .../evaluation_test_case_update_params.py | 4 +- .../types/chat/completion_create_params.py | 14 +++--- .../types/gpu_droplet_create_params.py | 17 +++---- .../autoscale_pool_droplet_template_param.py | 7 +-- ...ciated_resource_delete_selective_params.py | 13 ++--- .../types/gpu_droplets/firewall_param.py | 5 +- .../gpu_droplets/firewalls/tag_add_params.py | 6 ++- .../firewalls/tag_remove_params.py | 6 ++- .../types/gpu_droplets/image_create_params.py | 6 ++- .../types/gpu_droplets/lb_firewall_param.py | 7 +-- .../load_balancer_create_params.py | 7 +-- .../load_balancer_update_params.py | 7 +-- .../gpu_droplets/volume_create_params.py | 8 ++-- .../volumes/action_initiate_by_id_params.py | 6 ++- .../volumes/action_initiate_by_name_params.py | 6 ++- .../volumes/snapshot_create_params.py | 6 ++- .../types/knowledge_base_create_params.py | 5 +- .../types/knowledge_base_update_params.py | 4 +- .../indexing_job_create_params.py | 5 +- .../shared_params/firewall_rule_target.py | 12 +++-- 45 files changed, 247 insertions(+), 224 deletions(-) diff --git a/src/gradient/_utils/_transform.py b/src/gradient/_utils/_transform.py index b0cc20a7..f0bcefd4 100644 --- a/src/gradient/_utils/_transform.py +++ b/src/gradient/_utils/_transform.py @@ -16,6 +16,7 @@ lru_cache, is_mapping, is_iterable, + is_sequence, ) from .._files import is_base64_file_input from ._typing import ( @@ -24,6 +25,7 @@ extract_type_arg, is_iterable_type, is_required_type, + is_sequence_type, is_annotated_type, strip_annotated_type, ) @@ -184,6 +186,8 @@ def _transform_recursive( (is_list_type(stripped_type) and is_list(data)) # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + # Sequence[T] + or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str)) ): # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually # intended as an iterable, so we don't transform it. @@ -346,6 +350,8 @@ async def _async_transform_recursive( (is_list_type(stripped_type) and is_list(data)) # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + # Sequence[T] + or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str)) ): # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually # intended as an iterable, so we don't transform it. diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py index 67f7f4ae..8d06584c 100644 --- a/src/gradient/resources/agents/agents.py +++ b/src/gradient/resources/agents/agents.py @@ -2,8 +2,6 @@ from __future__ import annotations -from typing import List - import httpx from .routes import ( @@ -22,7 +20,7 @@ agent_update_params, agent_update_status_params, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from .api_keys import ( APIKeysResource, @@ -183,13 +181,13 @@ def create( anthropic_key_uuid: str | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: SequenceNotStr[str] | NotGiven = NOT_GIVEN, model_uuid: str | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, openai_key_uuid: str | NotGiven = NOT_GIVEN, project_id: str | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -313,7 +311,7 @@ def update( project_id: str | NotGiven = NOT_GIVEN, provide_citations: bool | NotGiven = NOT_GIVEN, retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, top_p: float | NotGiven = NOT_GIVEN, body_uuid: str | NotGiven = NOT_GIVEN, @@ -626,13 +624,13 @@ async def create( anthropic_key_uuid: str | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: SequenceNotStr[str] | NotGiven = NOT_GIVEN, model_uuid: str | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, openai_key_uuid: str | NotGiven = NOT_GIVEN, project_id: str | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -756,7 +754,7 @@ async def update( project_id: str | NotGiven = NOT_GIVEN, provide_citations: bool | NotGiven = NOT_GIVEN, retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, top_p: float | NotGiven = NOT_GIVEN, body_uuid: str | NotGiven = NOT_GIVEN, diff --git a/src/gradient/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py index d86631df..0d134389 100644 --- a/src/gradient/resources/agents/chat/completions.py +++ b/src/gradient/resources/agents/chat/completions.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, overload import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -60,7 +60,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -189,7 +189,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -317,7 +317,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -444,7 +444,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -533,7 +533,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -662,7 +662,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -790,7 +790,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -917,7 +917,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py index 1a73bc60..408396b1 100644 --- a/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py +++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py @@ -2,11 +2,9 @@ from __future__ import annotations -from typing import List - import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -104,7 +102,7 @@ def move( self, path_workspace_uuid: str, *, - agent_uuids: List[str] | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, body_workspace_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -232,7 +230,7 @@ async def move( self, path_workspace_uuid: str, *, - agent_uuids: List[str] | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, body_workspace_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py index a2cf5ebc..e6f610ef 100644 --- a/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py +++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -2,8 +2,6 @@ from __future__ import annotations -from typing import List - import httpx from .agents import ( @@ -14,7 +12,7 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -65,7 +63,7 @@ def with_streaming_response(self) -> WorkspacesResourceWithStreamingResponse: def create( self, *, - agent_uuids: List[str] | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -327,7 +325,7 @@ def with_streaming_response(self) -> AsyncWorkspacesResourceWithStreamingRespons async def create( self, *, - agent_uuids: List[str] | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. diff --git a/src/gradient/resources/agents/evaluation_runs.py b/src/gradient/resources/agents/evaluation_runs.py index e55cc275..e00c9eb3 100644 --- a/src/gradient/resources/agents/evaluation_runs.py +++ b/src/gradient/resources/agents/evaluation_runs.py @@ -2,11 +2,9 @@ from __future__ import annotations -from typing import List - import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -49,7 +47,7 @@ def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse def create( self, *, - agent_uuids: List[str] | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, run_name: str | NotGiven = NOT_GIVEN, test_case_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -251,7 +249,7 @@ def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingRes async def create( self, *, - agent_uuids: List[str] | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, run_name: str | NotGiven = NOT_GIVEN, test_case_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. diff --git a/src/gradient/resources/agents/evaluation_test_cases.py b/src/gradient/resources/agents/evaluation_test_cases.py index 454576c8..07f0a251 100644 --- a/src/gradient/resources/agents/evaluation_test_cases.py +++ b/src/gradient/resources/agents/evaluation_test_cases.py @@ -2,11 +2,9 @@ from __future__ import annotations -from typing import List - import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -60,7 +58,7 @@ def create( *, dataset_uuid: str | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, - metrics: List[str] | NotGiven = NOT_GIVEN, + metrics: SequenceNotStr[str] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, workspace_uuid: str | NotGiven = NOT_GIVEN, @@ -322,7 +320,7 @@ async def create( *, dataset_uuid: str | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, - metrics: List[str] | NotGiven = NOT_GIVEN, + metrics: SequenceNotStr[str] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, workspace_uuid: str | NotGiven = NOT_GIVEN, diff --git a/src/gradient/resources/chat/completions.py b/src/gradient/resources/chat/completions.py index d988ac29..80c3d550 100644 --- a/src/gradient/resources/chat/completions.py +++ b/src/gradient/resources/chat/completions.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, overload import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import required_args, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -60,7 +60,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -189,7 +189,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -317,7 +317,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -444,7 +444,7 @@ def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -533,7 +533,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -662,7 +662,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -790,7 +790,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -917,7 +917,7 @@ async def create( metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, diff --git a/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py index 2f3b90cf..0d55cb48 100644 --- a/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py +++ b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py @@ -2,11 +2,9 @@ from __future__ import annotations -from typing import List - import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -174,11 +172,11 @@ def delete_selective( self, droplet_id: int, *, - floating_ips: List[str] | NotGiven = NOT_GIVEN, - reserved_ips: List[str] | NotGiven = NOT_GIVEN, - snapshots: List[str] | NotGiven = NOT_GIVEN, - volume_snapshots: List[str] | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + floating_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + reserved_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + volume_snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -430,11 +428,11 @@ async def delete_selective( self, droplet_id: int, *, - floating_ips: List[str] | NotGiven = NOT_GIVEN, - reserved_ips: List[str] | NotGiven = NOT_GIVEN, - snapshots: List[str] | NotGiven = NOT_GIVEN, - volume_snapshots: List[str] | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + floating_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + reserved_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + volume_snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/gradient/resources/gpu_droplets/firewalls/tags.py b/src/gradient/resources/gpu_droplets/firewalls/tags.py index dc66c72f..ee13acd5 100644 --- a/src/gradient/resources/gpu_droplets/firewalls/tags.py +++ b/src/gradient/resources/gpu_droplets/firewalls/tags.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -46,7 +46,7 @@ def add( self, firewall_id: str, *, - tags: Optional[List[str]], + tags: Optional[SequenceNotStr[str]], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -95,7 +95,7 @@ def remove( self, firewall_id: str, *, - tags: Optional[List[str]], + tags: Optional[SequenceNotStr[str]], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -165,7 +165,7 @@ async def add( self, firewall_id: str, *, - tags: Optional[List[str]], + tags: Optional[SequenceNotStr[str]], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -214,7 +214,7 @@ async def remove( self, firewall_id: str, *, - tags: Optional[List[str]], + tags: Optional[SequenceNotStr[str]], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/gradient/resources/gpu_droplets/gpu_droplets.py b/src/gradient/resources/gpu_droplets/gpu_droplets.py index 0ce55ba8..48a9e5fe 100644 --- a/src/gradient/resources/gpu_droplets/gpu_droplets.py +++ b/src/gradient/resources/gpu_droplets/gpu_droplets.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, List, Union, Optional, cast +from typing import Any, Union, Optional, cast from typing_extensions import Literal, overload import httpx @@ -39,7 +39,7 @@ BackupsResourceWithStreamingResponse, AsyncBackupsResourceWithStreamingResponse, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ..._utils import required_args, maybe_transform, async_maybe_transform from ..._compat import cached_property from .autoscale import ( @@ -215,10 +215,10 @@ def create( monitoring: bool | NotGiven = NOT_GIVEN, private_networking: bool | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, with_droplet_agent: bool | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -328,7 +328,7 @@ def create( self, *, image: Union[str, int], - names: List[str], + names: SequenceNotStr[str], size: str, backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, backups: bool | NotGiven = NOT_GIVEN, @@ -336,10 +336,10 @@ def create( monitoring: bool | NotGiven = NOT_GIVEN, private_networking: bool | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, with_droplet_agent: bool | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -458,13 +458,13 @@ def create( monitoring: bool | NotGiven = NOT_GIVEN, private_networking: bool | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, with_droplet_agent: bool | NotGiven = NOT_GIVEN, - names: List[str] | NotGiven = NOT_GIVEN, + names: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -986,10 +986,10 @@ async def create( monitoring: bool | NotGiven = NOT_GIVEN, private_networking: bool | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, with_droplet_agent: bool | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1099,7 +1099,7 @@ async def create( self, *, image: Union[str, int], - names: List[str], + names: SequenceNotStr[str], size: str, backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, backups: bool | NotGiven = NOT_GIVEN, @@ -1107,10 +1107,10 @@ async def create( monitoring: bool | NotGiven = NOT_GIVEN, private_networking: bool | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, with_droplet_agent: bool | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1229,13 +1229,13 @@ async def create( monitoring: bool | NotGiven = NOT_GIVEN, private_networking: bool | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, + volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, with_droplet_agent: bool | NotGiven = NOT_GIVEN, - names: List[str] | NotGiven = NOT_GIVEN, + names: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/gradient/resources/gpu_droplets/images/images.py b/src/gradient/resources/gpu_droplets/images/images.py index 09994263..1b00c024 100644 --- a/src/gradient/resources/gpu_droplets/images/images.py +++ b/src/gradient/resources/gpu_droplets/images/images.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal import httpx @@ -15,7 +15,7 @@ ActionsResourceWithStreamingResponse, AsyncActionsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -98,7 +98,7 @@ def create( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, url: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -464,7 +464,7 @@ async def create( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, url: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py index d876b50f..8f11a5da 100644 --- a/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Iterable +from typing import Iterable from typing_extensions import Literal, overload import httpx @@ -15,7 +15,7 @@ DropletsResourceWithStreamingResponse, AsyncDropletsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -122,7 +122,7 @@ def create( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -281,7 +281,7 @@ def create( size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -441,7 +441,7 @@ def create( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -571,7 +571,7 @@ def update( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -725,7 +725,7 @@ def update( size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -880,7 +880,7 @@ def update( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -1134,7 +1134,7 @@ async def create( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -1293,7 +1293,7 @@ async def create( size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -1453,7 +1453,7 @@ async def create( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -1583,7 +1583,7 @@ async def update( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -1737,7 +1737,7 @@ async def update( size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, @@ -1892,7 +1892,7 @@ async def update( size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, size_unit: int | NotGiven = NOT_GIVEN, sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, diff --git a/src/gradient/resources/gpu_droplets/volumes/actions.py b/src/gradient/resources/gpu_droplets/volumes/actions.py index 2e093136..c648beaa 100644 --- a/src/gradient/resources/gpu_droplets/volumes/actions.py +++ b/src/gradient/resources/gpu_droplets/volumes/actions.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Literal, overload import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -184,7 +184,7 @@ def initiate_by_id( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -474,7 +474,7 @@ def initiate_by_id( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, size_gigabytes: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -541,7 +541,7 @@ def initiate_by_name( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -719,7 +719,7 @@ def initiate_by_name( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -909,7 +909,7 @@ async def initiate_by_id( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1199,7 +1199,7 @@ async def initiate_by_id( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, size_gigabytes: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1266,7 +1266,7 @@ async def initiate_by_name( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1444,7 +1444,7 @@ async def initiate_by_name( "syd1", ] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/gradient/resources/gpu_droplets/volumes/snapshots.py b/src/gradient/resources/gpu_droplets/volumes/snapshots.py index 0f9e30fa..7e925264 100644 --- a/src/gradient/resources/gpu_droplets/volumes/snapshots.py +++ b/src/gradient/resources/gpu_droplets/volumes/snapshots.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -50,7 +50,7 @@ def create( volume_id: str, *, name: str, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -251,7 +251,7 @@ async def create( volume_id: str, *, name: str, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/gradient/resources/gpu_droplets/volumes/volumes.py b/src/gradient/resources/gpu_droplets/volumes/volumes.py index ada4aedf..ee980a10 100644 --- a/src/gradient/resources/gpu_droplets/volumes/volumes.py +++ b/src/gradient/resources/gpu_droplets/volumes/volumes.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Literal, overload import httpx @@ -15,7 +15,7 @@ ActionsResourceWithStreamingResponse, AsyncActionsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ...._utils import required_args, maybe_transform, async_maybe_transform from .snapshots import ( SnapshotsResource, @@ -97,7 +97,7 @@ def create( filesystem_label: str | NotGiven = NOT_GIVEN, filesystem_type: str | NotGiven = NOT_GIVEN, snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -182,7 +182,7 @@ def create( filesystem_label: str | NotGiven = NOT_GIVEN, filesystem_type: str | NotGiven = NOT_GIVEN, snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -267,7 +267,7 @@ def create( filesystem_label: str | NotGiven = NOT_GIVEN, filesystem_type: str | NotGiven = NOT_GIVEN, snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -590,7 +590,7 @@ async def create( filesystem_label: str | NotGiven = NOT_GIVEN, filesystem_type: str | NotGiven = NOT_GIVEN, snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -675,7 +675,7 @@ async def create( filesystem_label: str | NotGiven = NOT_GIVEN, filesystem_type: str | NotGiven = NOT_GIVEN, snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -760,7 +760,7 @@ async def create( filesystem_label: str | NotGiven = NOT_GIVEN, filesystem_type: str | NotGiven = NOT_GIVEN, snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/gradient/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py index 723b42f5..41e7da76 100644 --- a/src/gradient/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py @@ -2,11 +2,9 @@ from __future__ import annotations -from typing import List - import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -54,7 +52,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: def create( self, *, - data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + data_source_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, knowledge_base_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -285,7 +283,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo async def create( self, *, - data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + data_source_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, knowledge_base_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/src/gradient/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py index 594b2ba7..61fc85a8 100644 --- a/src/gradient/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing import List, Iterable +from typing import Iterable import httpx from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -80,7 +80,7 @@ def create( name: str | NotGiven = NOT_GIVEN, project_id: str | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -189,7 +189,7 @@ def update( embedding_model_uuid: str | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, project_id: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, body_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -368,7 +368,7 @@ async def create( name: str | NotGiven = NOT_GIVEN, project_id: str | NotGiven = NOT_GIVEN, region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, vpc_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -477,7 +477,7 @@ async def update( embedding_model_uuid: str | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, project_id: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, + tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, body_uuid: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/src/gradient/types/agent_create_params.py b/src/gradient/types/agent_create_params.py index 68ebd227..db84a258 100644 --- a/src/gradient/types/agent_create_params.py +++ b/src/gradient/types/agent_create_params.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import List from typing_extensions import Annotated, TypedDict +from .._types import SequenceNotStr from .._utils import PropertyInfo __all__ = ["AgentCreateParams"] @@ -25,7 +25,7 @@ class AgentCreateParams(TypedDict, total=False): for best practices. """ - knowledge_base_uuid: List[str] + knowledge_base_uuid: SequenceNotStr[str] """Ids of the knowledge base(s) to attach to the agent""" model_uuid: str @@ -43,5 +43,5 @@ class AgentCreateParams(TypedDict, total=False): region: str """The DigitalOcean region to deploy your agent in""" - tags: List[str] + tags: SequenceNotStr[str] """Agent tag to organize related resources""" diff --git a/src/gradient/types/agent_update_params.py b/src/gradient/types/agent_update_params.py index c26bf833..75c30cba 100644 --- a/src/gradient/types/agent_update_params.py +++ b/src/gradient/types/agent_update_params.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import List from typing_extensions import Annotated, TypedDict +from .._types import SequenceNotStr from .._utils import PropertyInfo from .api_retrieval_method import APIRetrievalMethod @@ -64,7 +64,7 @@ class AgentUpdateParams(TypedDict, total=False): - RETRIEVAL_METHOD_NONE: The retrieval method is none """ - tags: List[str] + tags: SequenceNotStr[str] """A set of abitrary tags to organize your agent""" temperature: float diff --git a/src/gradient/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py index aaec2ba5..d8cf7bc1 100644 --- a/src/gradient/types/agents/chat/completion_create_params.py +++ b/src/gradient/types/agents/chat/completion_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ...._types import SequenceNotStr + __all__ = [ "CompletionCreateParamsBase", "Message", @@ -96,7 +98,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - stop: Union[Optional[str], List[str], None] + stop: Union[Optional[str], SequenceNotStr[str], None] """Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -156,7 +158,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, List[str]]] + content: Required[Union[str, SequenceNotStr[str]]] """The contents of the system message.""" role: Required[Literal["system"]] @@ -164,7 +166,7 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, List[str]]] + content: Required[Union[str, SequenceNotStr[str]]] """The contents of the developer message.""" role: Required[Literal["developer"]] @@ -172,7 +174,7 @@ class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, List[str]]] + content: Required[Union[str, SequenceNotStr[str]]] """The contents of the user message.""" role: Required[Literal["user"]] @@ -207,7 +209,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Union[str, List[str], None] + content: Union[str, SequenceNotStr[str], None] """The contents of the assistant message.""" tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] diff --git a/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py index 7a418e81..443a6f43 100644 --- a/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py +++ b/src/gradient/types/agents/evaluation_metrics/workspace_create_params.py @@ -2,14 +2,15 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict +from ...._types import SequenceNotStr + __all__ = ["WorkspaceCreateParams"] class WorkspaceCreateParams(TypedDict, total=False): - agent_uuids: List[str] + agent_uuids: SequenceNotStr[str] """Ids of the agents(s) to attach to the workspace""" description: str diff --git a/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py index 74e27dd2..7b451084 100644 --- a/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py +++ b/src/gradient/types/agents/evaluation_metrics/workspaces/agent_move_params.py @@ -2,16 +2,16 @@ from __future__ import annotations -from typing import List from typing_extensions import Annotated, TypedDict +from ....._types import SequenceNotStr from ....._utils import PropertyInfo __all__ = ["AgentMoveParams"] class AgentMoveParams(TypedDict, total=False): - agent_uuids: List[str] + agent_uuids: SequenceNotStr[str] """Agent uuids""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] diff --git a/src/gradient/types/agents/evaluation_run_create_params.py b/src/gradient/types/agents/evaluation_run_create_params.py index 3029e192..52bbee85 100644 --- a/src/gradient/types/agents/evaluation_run_create_params.py +++ b/src/gradient/types/agents/evaluation_run_create_params.py @@ -2,14 +2,15 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict +from ..._types import SequenceNotStr + __all__ = ["EvaluationRunCreateParams"] class EvaluationRunCreateParams(TypedDict, total=False): - agent_uuids: List[str] + agent_uuids: SequenceNotStr[str] """Agent UUIDs to run the test case against.""" run_name: str diff --git a/src/gradient/types/agents/evaluation_test_case_create_params.py b/src/gradient/types/agents/evaluation_test_case_create_params.py index 51ce20c7..af49d024 100644 --- a/src/gradient/types/agents/evaluation_test_case_create_params.py +++ b/src/gradient/types/agents/evaluation_test_case_create_params.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict +from ..._types import SequenceNotStr from .api_star_metric_param import APIStarMetricParam __all__ = ["EvaluationTestCaseCreateParams"] @@ -17,7 +17,7 @@ class EvaluationTestCaseCreateParams(TypedDict, total=False): description: str """Description of the test case.""" - metrics: List[str] + metrics: SequenceNotStr[str] """Full metric list to use for evaluation test case.""" name: str diff --git a/src/gradient/types/agents/evaluation_test_case_update_params.py b/src/gradient/types/agents/evaluation_test_case_update_params.py index 825f961b..d707d909 100644 --- a/src/gradient/types/agents/evaluation_test_case_update_params.py +++ b/src/gradient/types/agents/evaluation_test_case_update_params.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import List from typing_extensions import Annotated, TypedDict +from ..._types import SequenceNotStr from ..._utils import PropertyInfo from .api_star_metric_param import APIStarMetricParam @@ -30,4 +30,4 @@ class EvaluationTestCaseUpdateParams(TypedDict, total=False): class Metrics(TypedDict, total=False): - metric_uuids: List[str] + metric_uuids: SequenceNotStr[str] diff --git a/src/gradient/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py index aaec2ba5..17f00242 100644 --- a/src/gradient/types/chat/completion_create_params.py +++ b/src/gradient/types/chat/completion_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr + __all__ = [ "CompletionCreateParamsBase", "Message", @@ -96,7 +98,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - stop: Union[Optional[str], List[str], None] + stop: Union[Optional[str], SequenceNotStr[str], None] """Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. @@ -156,7 +158,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, List[str]]] + content: Required[Union[str, SequenceNotStr[str]]] """The contents of the system message.""" role: Required[Literal["system"]] @@ -164,7 +166,7 @@ class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, List[str]]] + content: Required[Union[str, SequenceNotStr[str]]] """The contents of the developer message.""" role: Required[Literal["developer"]] @@ -172,7 +174,7 @@ class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, List[str]]] + content: Required[Union[str, SequenceNotStr[str]]] """The contents of the user message.""" role: Required[Literal["user"]] @@ -207,7 +209,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Union[str, List[str], None] + content: Union[str, SequenceNotStr[str], None] """The contents of the assistant message.""" tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] diff --git a/src/gradient/types/gpu_droplet_create_params.py b/src/gradient/types/gpu_droplet_create_params.py index f38661fb..96403479 100644 --- a/src/gradient/types/gpu_droplet_create_params.py +++ b/src/gradient/types/gpu_droplet_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Required, TypeAlias, TypedDict +from .._types import SequenceNotStr from .droplet_backup_policy_param import DropletBackupPolicyParam __all__ = ["GPUDropletCreateParams", "DropletSingleCreate", "DropletMultiCreate"] @@ -65,14 +66,14 @@ class DropletSingleCreate(TypedDict, total=False): the Droplet may deploy in any region. """ - ssh_keys: List[Union[str, int]] + ssh_keys: SequenceNotStr[Union[str, int]] """ An array containing the IDs or fingerprints of the SSH keys that you wish to embed in the Droplet's root account upon creation. You must add the keys to your team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. """ - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to apply to the Droplet after it is created. @@ -86,7 +87,7 @@ class DropletSingleCreate(TypedDict, total=False): and may not exceed 64 KiB in size. """ - volumes: List[str] + volumes: SequenceNotStr[str] """ An array of IDs for block storage volumes that will be attached to the Droplet once created. The volumes must not already be attached to an existing Droplet. @@ -118,7 +119,7 @@ class DropletMultiCreate(TypedDict, total=False): scope. """ - names: Required[List[str]] + names: Required[SequenceNotStr[str]] """ An array of human human-readable strings you wish to use when displaying the Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS @@ -165,14 +166,14 @@ class DropletMultiCreate(TypedDict, total=False): the Droplet may deploy in any region. """ - ssh_keys: List[Union[str, int]] + ssh_keys: SequenceNotStr[Union[str, int]] """ An array containing the IDs or fingerprints of the SSH keys that you wish to embed in the Droplet's root account upon creation. You must add the keys to your team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. """ - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to apply to the Droplet after it is created. @@ -186,7 +187,7 @@ class DropletMultiCreate(TypedDict, total=False): and may not exceed 64 KiB in size. """ - volumes: List[str] + volumes: SequenceNotStr[str] """ An array of IDs for block storage volumes that will be attached to the Droplet once created. The volumes must not already be attached to an existing Droplet. diff --git a/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py index c491ed55..3eb8ac89 100644 --- a/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py +++ b/src/gradient/types/gpu_droplets/autoscale_pool_droplet_template_param.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List from typing_extensions import Literal, Required, TypedDict +from ..._types import SequenceNotStr + __all__ = ["AutoscalePoolDropletTemplateParam"] @@ -38,7 +39,7 @@ class AutoscalePoolDropletTemplateParam(TypedDict, total=False): size: Required[str] """The Droplet size to be used for all Droplets in the autoscale pool.""" - ssh_keys: Required[List[str]] + ssh_keys: Required[SequenceNotStr[str]] """The SSH keys to be installed on the Droplets in the autoscale pool. You can either specify the key ID or the fingerprint. Requires `ssh_key:read` @@ -57,7 +58,7 @@ class AutoscalePoolDropletTemplateParam(TypedDict, total=False): `project:read` scope. """ - tags: List[str] + tags: SequenceNotStr[str] """ The tags to apply to each of the Droplets in the autoscale pool. Requires `tag:read` scope. diff --git a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py index f4037b6b..9a9730e7 100644 --- a/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py +++ b/src/gradient/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py @@ -2,33 +2,34 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict +from ..._types import SequenceNotStr + __all__ = ["DestroyWithAssociatedResourceDeleteSelectiveParams"] class DestroyWithAssociatedResourceDeleteSelectiveParams(TypedDict, total=False): - floating_ips: List[str] + floating_ips: SequenceNotStr[str] """ An array of unique identifiers for the floating IPs to be scheduled for deletion. """ - reserved_ips: List[str] + reserved_ips: SequenceNotStr[str] """ An array of unique identifiers for the reserved IPs to be scheduled for deletion. """ - snapshots: List[str] + snapshots: SequenceNotStr[str] """An array of unique identifiers for the snapshots to be scheduled for deletion.""" - volume_snapshots: List[str] + volume_snapshots: SequenceNotStr[str] """ An array of unique identifiers for the volume snapshots to be scheduled for deletion. """ - volumes: List[str] + volumes: SequenceNotStr[str] """An array of unique identifiers for the volumes to be scheduled for deletion.""" diff --git a/src/gradient/types/gpu_droplets/firewall_param.py b/src/gradient/types/gpu_droplets/firewall_param.py index 1be9cf6a..8b5a5a15 100644 --- a/src/gradient/types/gpu_droplets/firewall_param.py +++ b/src/gradient/types/gpu_droplets/firewall_param.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Iterable, Optional +from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ..._types import SequenceNotStr from ..shared_params.firewall_rule_target import FirewallRuleTarget __all__ = ["FirewallParam", "InboundRule", "OutboundRule"] @@ -58,7 +59,7 @@ class FirewallParam(TypedDict, total=False): outbound_rules: Optional[Iterable[OutboundRule]] - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names must exist in order to be referenced in a request. diff --git a/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py b/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py index 63af7640..c3b9696e 100644 --- a/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py +++ b/src/gradient/types/gpu_droplets/firewalls/tag_add_params.py @@ -2,14 +2,16 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Required, TypedDict +from ...._types import SequenceNotStr + __all__ = ["TagAddParams"] class TagAddParams(TypedDict, total=False): - tags: Required[Optional[List[str]]] + tags: Required[Optional[SequenceNotStr[str]]] """A flat array of tag names as strings to be applied to the resource. Tag names must exist in order to be referenced in a request. diff --git a/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py b/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py index 91a3e382..bdd848f3 100644 --- a/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py +++ b/src/gradient/types/gpu_droplets/firewalls/tag_remove_params.py @@ -2,14 +2,16 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Required, TypedDict +from ...._types import SequenceNotStr + __all__ = ["TagRemoveParams"] class TagRemoveParams(TypedDict, total=False): - tags: Required[Optional[List[str]]] + tags: Required[Optional[SequenceNotStr[str]]] """A flat array of tag names as strings to be applied to the resource. Tag names must exist in order to be referenced in a request. diff --git a/src/gradient/types/gpu_droplets/image_create_params.py b/src/gradient/types/gpu_droplets/image_create_params.py index efbd684c..baae3bf5 100644 --- a/src/gradient/types/gpu_droplets/image_create_params.py +++ b/src/gradient/types/gpu_droplets/image_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Literal, TypedDict +from ..._types import SequenceNotStr + __all__ = ["ImageCreateParams"] @@ -64,7 +66,7 @@ class ImageCreateParams(TypedDict, total=False): available. """ - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names may be for either existing or new tags. diff --git a/src/gradient/types/gpu_droplets/lb_firewall_param.py b/src/gradient/types/gpu_droplets/lb_firewall_param.py index 6f1dcf10..7d54a048 100644 --- a/src/gradient/types/gpu_droplets/lb_firewall_param.py +++ b/src/gradient/types/gpu_droplets/lb_firewall_param.py @@ -2,20 +2,21 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict +from ..._types import SequenceNotStr + __all__ = ["LbFirewallParam"] class LbFirewallParam(TypedDict, total=False): - allow: List[str] + allow: SequenceNotStr[str] """ the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or 'cidr:1.2.0.0/16') """ - deny: List[str] + deny: SequenceNotStr[str] """ the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or 'cidr:1.2.0.0/16') diff --git a/src/gradient/types/gpu_droplets/load_balancer_create_params.py b/src/gradient/types/gpu_droplets/load_balancer_create_params.py index a87d9148..06472c78 100644 --- a/src/gradient/types/gpu_droplets/load_balancer_create_params.py +++ b/src/gradient/types/gpu_droplets/load_balancer_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from .domains_param import DomainsParam from .lb_firewall_param import LbFirewallParam from .glb_settings_param import GlbSettingsParam @@ -148,7 +149,7 @@ class AssignDropletsByID(TypedDict, total=False): sticky_sessions: StickySessionsParam """An object specifying sticky sessions settings for the load balancer.""" - target_load_balancer_ids: List[str] + target_load_balancer_ids: SequenceNotStr[str] """ An array containing the UUIDs of the Regional load balancers to be used as target backends for a Global load balancer. @@ -308,7 +309,7 @@ class AssignDropletsByTag(TypedDict, total=False): balancer. """ - target_load_balancer_ids: List[str] + target_load_balancer_ids: SequenceNotStr[str] """ An array containing the UUIDs of the Regional load balancers to be used as target backends for a Global load balancer. diff --git a/src/gradient/types/gpu_droplets/load_balancer_update_params.py b/src/gradient/types/gpu_droplets/load_balancer_update_params.py index 9a1906cb..01c2bda5 100644 --- a/src/gradient/types/gpu_droplets/load_balancer_update_params.py +++ b/src/gradient/types/gpu_droplets/load_balancer_update_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from .domains_param import DomainsParam from .lb_firewall_param import LbFirewallParam from .glb_settings_param import GlbSettingsParam @@ -148,7 +149,7 @@ class AssignDropletsByID(TypedDict, total=False): sticky_sessions: StickySessionsParam """An object specifying sticky sessions settings for the load balancer.""" - target_load_balancer_ids: List[str] + target_load_balancer_ids: SequenceNotStr[str] """ An array containing the UUIDs of the Regional load balancers to be used as target backends for a Global load balancer. @@ -308,7 +309,7 @@ class AssignDropletsByTag(TypedDict, total=False): balancer. """ - target_load_balancer_ids: List[str] + target_load_balancer_ids: SequenceNotStr[str] """ An array containing the UUIDs of the Regional load balancers to be used as target backends for a Global load balancer. diff --git a/src/gradient/types/gpu_droplets/volume_create_params.py b/src/gradient/types/gpu_droplets/volume_create_params.py index fc889801..c58f7f9d 100644 --- a/src/gradient/types/gpu_droplets/volume_create_params.py +++ b/src/gradient/types/gpu_droplets/volume_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr + __all__ = ["VolumeCreateParams", "VolumesExt4", "VolumesXfs"] @@ -70,7 +72,7 @@ class VolumesExt4(TypedDict, total=False): snapshot_id: str """The unique identifier for the volume snapshot from which to create the volume.""" - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names may be for either existing or new tags. @@ -141,7 +143,7 @@ class VolumesXfs(TypedDict, total=False): snapshot_id: str """The unique identifier for the volume snapshot from which to create the volume.""" - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names may be for either existing or new tags. diff --git a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py index 6d41d463..bf1869af 100644 --- a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py +++ b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_id_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ...._types import SequenceNotStr + __all__ = ["ActionInitiateByIDParams", "VolumeActionPostAttach", "VolumeActionPostDetach", "VolumeActionPostResize"] @@ -46,7 +48,7 @@ class VolumeActionPostAttach(TypedDict, total=False): available. """ - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names may be for either existing or new tags. diff --git a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py index d1a7d084..f37d6d9a 100644 --- a/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py +++ b/src/gradient/types/gpu_droplets/volumes/action_initiate_by_name_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ...._types import SequenceNotStr + __all__ = ["ActionInitiateByNameParams", "VolumeActionPostAttach", "VolumeActionPostDetach"] @@ -46,7 +48,7 @@ class VolumeActionPostAttach(TypedDict, total=False): available. """ - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names may be for either existing or new tags. diff --git a/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py b/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py index 8cce4a59..890dd302 100644 --- a/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py +++ b/src/gradient/types/gpu_droplets/volumes/snapshot_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Required, TypedDict +from ...._types import SequenceNotStr + __all__ = ["SnapshotCreateParams"] @@ -12,7 +14,7 @@ class SnapshotCreateParams(TypedDict, total=False): name: Required[str] """A human-readable name for the volume snapshot.""" - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names may be for either existing or new tags. diff --git a/src/gradient/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py index 5c0df9a6..e40bd598 100644 --- a/src/gradient/types/knowledge_base_create_params.py +++ b/src/gradient/types/knowledge_base_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Iterable +from typing import Iterable from typing_extensions import TypedDict +from .._types import SequenceNotStr from .knowledge_bases.aws_data_source_param import AwsDataSourceParam from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam @@ -44,7 +45,7 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): region: str """The datacenter region to deploy the knowledge base in.""" - tags: List[str] + tags: SequenceNotStr[str] """Tags to organize your knowledge base.""" vpc_uuid: str diff --git a/src/gradient/types/knowledge_base_update_params.py b/src/gradient/types/knowledge_base_update_params.py index 7a86b40c..cfb52016 100644 --- a/src/gradient/types/knowledge_base_update_params.py +++ b/src/gradient/types/knowledge_base_update_params.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import List from typing_extensions import Annotated, TypedDict +from .._types import SequenceNotStr from .._utils import PropertyInfo __all__ = ["KnowledgeBaseUpdateParams"] @@ -23,7 +23,7 @@ class KnowledgeBaseUpdateParams(TypedDict, total=False): project_id: str """The id of the DigitalOcean project this knowledge base will belong to""" - tags: List[str] + tags: SequenceNotStr[str] """Tags to organize your knowledge base.""" body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/gradient/types/knowledge_bases/indexing_job_create_params.py b/src/gradient/types/knowledge_bases/indexing_job_create_params.py index d92c5790..ebd8632b 100644 --- a/src/gradient/types/knowledge_bases/indexing_job_create_params.py +++ b/src/gradient/types/knowledge_bases/indexing_job_create_params.py @@ -2,14 +2,15 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict +from ..._types import SequenceNotStr + __all__ = ["IndexingJobCreateParams"] class IndexingJobCreateParams(TypedDict, total=False): - data_source_uuids: List[str] + data_source_uuids: SequenceNotStr[str] """ List of data source ids to index, if none are provided, all data sources will be indexed diff --git a/src/gradient/types/shared_params/firewall_rule_target.py b/src/gradient/types/shared_params/firewall_rule_target.py index 49a5f75c..7f317f6c 100644 --- a/src/gradient/types/shared_params/firewall_rule_target.py +++ b/src/gradient/types/shared_params/firewall_rule_target.py @@ -2,14 +2,16 @@ from __future__ import annotations -from typing import List, Iterable, Optional +from typing import Iterable, Optional from typing_extensions import TypedDict +from ..._types import SequenceNotStr + __all__ = ["FirewallRuleTarget"] class FirewallRuleTarget(TypedDict, total=False): - addresses: List[str] + addresses: SequenceNotStr[str] """ An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, and/or IPv6 CIDRs to which the firewall will allow traffic. @@ -21,19 +23,19 @@ class FirewallRuleTarget(TypedDict, total=False): traffic. """ - kubernetes_ids: List[str] + kubernetes_ids: SequenceNotStr[str] """ An array containing the IDs of the Kubernetes clusters to which the firewall will allow traffic. """ - load_balancer_uids: List[str] + load_balancer_uids: SequenceNotStr[str] """ An array containing the IDs of the load balancers to which the firewall will allow traffic. """ - tags: Optional[List[str]] + tags: Optional[SequenceNotStr[str]] """A flat array of tag names as strings to be applied to the resource. Tag names must exist in order to be referenced in a request. From 2f0ca954b236a11fa224e05cb363d53f768c78da Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 03:55:25 +0000 Subject: [PATCH 162/200] feat: improve future compat with pydantic v3 --- src/gradient/_base_client.py | 6 +- src/gradient/_compat.py | 96 ++++++++--------- src/gradient/_models.py | 80 +++++++------- src/gradient/_utils/__init__.py | 10 +- src/gradient/_utils/_compat.py | 45 ++++++++ src/gradient/_utils/_datetime_parse.py | 136 ++++++++++++++++++++++++ src/gradient/_utils/_transform.py | 6 +- src/gradient/_utils/_typing.py | 2 +- src/gradient/_utils/_utils.py | 1 - src/gradient/types/__init__.py | 50 ++++----- tests/test_models.py | 48 ++++----- tests/test_transform.py | 16 +-- tests/test_utils/test_datetime_parse.py | 110 +++++++++++++++++++ tests/utils.py | 8 +- 14 files changed, 457 insertions(+), 157 deletions(-) create mode 100644 src/gradient/_utils/_compat.py create mode 100644 src/gradient/_utils/_datetime_parse.py create mode 100644 tests/test_utils/test_datetime_parse.py diff --git a/src/gradient/_base_client.py b/src/gradient/_base_client.py index 74f3c57a..58b14617 100644 --- a/src/gradient/_base_client.py +++ b/src/gradient/_base_client.py @@ -59,7 +59,7 @@ ModelBuilderProtocol, ) from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping -from ._compat import PYDANTIC_V2, model_copy, model_dump +from ._compat import PYDANTIC_V1, model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( APIResponse, @@ -232,7 +232,7 @@ def _set_private_attributes( model: Type[_T], options: FinalRequestOptions, ) -> None: - if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: self.__pydantic_private__ = {} self._model = model @@ -320,7 +320,7 @@ def _set_private_attributes( client: AsyncAPIClient, options: FinalRequestOptions, ) -> None: - if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: self.__pydantic_private__ = {} self._model = model diff --git a/src/gradient/_compat.py b/src/gradient/_compat.py index 92d9ee61..bdef67f0 100644 --- a/src/gradient/_compat.py +++ b/src/gradient/_compat.py @@ -12,14 +12,13 @@ _T = TypeVar("_T") _ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) -# --------------- Pydantic v2 compatibility --------------- +# --------------- Pydantic v2, v3 compatibility --------------- # Pyright incorrectly reports some of our functions as overriding a method when they don't # pyright: reportIncompatibleMethodOverride=false -PYDANTIC_V2 = pydantic.VERSION.startswith("2.") +PYDANTIC_V1 = pydantic.VERSION.startswith("1.") -# v1 re-exports if TYPE_CHECKING: def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001 @@ -44,90 +43,92 @@ def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001 ... else: - if PYDANTIC_V2: - from pydantic.v1.typing import ( + # v1 re-exports + if PYDANTIC_V1: + from pydantic.typing import ( get_args as get_args, is_union as is_union, get_origin as get_origin, is_typeddict as is_typeddict, is_literal_type as is_literal_type, ) - from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime + from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime else: - from pydantic.typing import ( + from ._utils import ( get_args as get_args, is_union as is_union, get_origin as get_origin, + parse_date as parse_date, is_typeddict as is_typeddict, + parse_datetime as parse_datetime, is_literal_type as is_literal_type, ) - from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # refactored config if TYPE_CHECKING: from pydantic import ConfigDict as ConfigDict else: - if PYDANTIC_V2: - from pydantic import ConfigDict - else: + if PYDANTIC_V1: # TODO: provide an error message here? ConfigDict = None + else: + from pydantic import ConfigDict as ConfigDict # renamed methods / properties def parse_obj(model: type[_ModelT], value: object) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(value) - else: + if PYDANTIC_V1: return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + else: + return model.model_validate(value) def field_is_required(field: FieldInfo) -> bool: - if PYDANTIC_V2: - return field.is_required() - return field.required # type: ignore + if PYDANTIC_V1: + return field.required # type: ignore + return field.is_required() def field_get_default(field: FieldInfo) -> Any: value = field.get_default() - if PYDANTIC_V2: - from pydantic_core import PydanticUndefined - - if value == PydanticUndefined: - return None + if PYDANTIC_V1: return value + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None return value def field_outer_type(field: FieldInfo) -> Any: - if PYDANTIC_V2: - return field.annotation - return field.outer_type_ # type: ignore + if PYDANTIC_V1: + return field.outer_type_ # type: ignore + return field.annotation def get_model_config(model: type[pydantic.BaseModel]) -> Any: - if PYDANTIC_V2: - return model.model_config - return model.__config__ # type: ignore + if PYDANTIC_V1: + return model.__config__ # type: ignore + return model.model_config def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: - if PYDANTIC_V2: - return model.model_fields - return model.__fields__ # type: ignore + if PYDANTIC_V1: + return model.__fields__ # type: ignore + return model.model_fields def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT: - if PYDANTIC_V2: - return model.model_copy(deep=deep) - return model.copy(deep=deep) # type: ignore + if PYDANTIC_V1: + return model.copy(deep=deep) # type: ignore + return model.model_copy(deep=deep) def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: - if PYDANTIC_V2: - return model.model_dump_json(indent=indent) - return model.json(indent=indent) # type: ignore + if PYDANTIC_V1: + return model.json(indent=indent) # type: ignore + return model.model_dump_json(indent=indent) def model_dump( @@ -139,14 +140,14 @@ def model_dump( warnings: bool = True, mode: Literal["json", "python"] = "python", ) -> dict[str, Any]: - if PYDANTIC_V2 or hasattr(model, "model_dump"): + if (not PYDANTIC_V1) or hasattr(model, "model_dump"): return model.model_dump( mode=mode, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, # warnings are not supported in Pydantic v1 - warnings=warnings if PYDANTIC_V2 else True, + warnings=True if PYDANTIC_V1 else warnings, ) return cast( "dict[str, Any]", @@ -159,9 +160,9 @@ def model_dump( def model_parse(model: type[_ModelT], data: Any) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(data) - return model.parse_obj(data) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.parse_obj(data) # pyright: ignore[reportDeprecated] + return model.model_validate(data) # generic models @@ -170,17 +171,16 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT: class GenericModel(pydantic.BaseModel): ... else: - if PYDANTIC_V2: + if PYDANTIC_V1: + import pydantic.generics + + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... + else: # there no longer needs to be a distinction in v2 but # we still have to create our own subclass to avoid # inconsistent MRO ordering errors class GenericModel(pydantic.BaseModel): ... - else: - import pydantic.generics - - class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... - # cached properties if TYPE_CHECKING: diff --git a/src/gradient/_models.py b/src/gradient/_models.py index 92f7c10b..3a6017ef 100644 --- a/src/gradient/_models.py +++ b/src/gradient/_models.py @@ -50,7 +50,7 @@ strip_annotated_type, ) from ._compat import ( - PYDANTIC_V2, + PYDANTIC_V1, ConfigDict, GenericModel as BaseGenericModel, get_args, @@ -81,11 +81,7 @@ class _ConfigProtocol(Protocol): class BaseModel(pydantic.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) - ) - else: + if PYDANTIC_V1: @property @override @@ -95,6 +91,10 @@ def model_fields_set(self) -> set[str]: class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] extra: Any = pydantic.Extra.allow # type: ignore + else: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) + ) def to_dict( self, @@ -215,25 +215,25 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] if key not in model_fields: parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value - if PYDANTIC_V2: - _extra[key] = parsed - else: + if PYDANTIC_V1: _fields_set.add(key) fields_values[key] = parsed + else: + _extra[key] = parsed object.__setattr__(m, "__dict__", fields_values) - if PYDANTIC_V2: - # these properties are copied from Pydantic's `model_construct()` method - object.__setattr__(m, "__pydantic_private__", None) - object.__setattr__(m, "__pydantic_extra__", _extra) - object.__setattr__(m, "__pydantic_fields_set__", _fields_set) - else: + if PYDANTIC_V1: # init_private_attributes() does not exist in v2 m._init_private_attributes() # type: ignore # copied from Pydantic v1's `construct()` method object.__setattr__(m, "__fields_set__", _fields_set) + else: + # these properties are copied from Pydantic's `model_construct()` method + object.__setattr__(m, "__pydantic_private__", None) + object.__setattr__(m, "__pydantic_extra__", _extra) + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) return m @@ -243,7 +243,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] # although not in practice model_construct = construct - if not PYDANTIC_V2: + if PYDANTIC_V1: # we define aliases for some of the new pydantic v2 methods so # that we can just document these methods without having to specify # a specific pydantic version as some users may not know which @@ -363,10 +363,10 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if value is None: return field_get_default(field) - if PYDANTIC_V2: - type_ = field.annotation - else: + if PYDANTIC_V1: type_ = cast(type, field.outer_type_) # type: ignore + else: + type_ = field.annotation # type: ignore if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") @@ -375,7 +375,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: - if not PYDANTIC_V2: + if PYDANTIC_V1: # TODO return None @@ -628,30 +628,30 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, for variant in get_args(union): variant = strip_annotated_type(variant) if is_basemodel_type(variant): - if PYDANTIC_V2: - field = _extract_field_schema_pv2(variant, discriminator_field_name) - if not field: + if PYDANTIC_V1: + field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + if not field_info: continue # Note: if one variant defines an alias then they all should - discriminator_alias = field.get("serialization_alias") - - field_schema = field["schema"] + discriminator_alias = field_info.alias - if field_schema["type"] == "literal": - for entry in cast("LiteralSchema", field_schema)["expected"]: + if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): + for entry in get_args(annotation): if isinstance(entry, str): mapping[entry] = variant else: - field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] - if not field_info: + field = _extract_field_schema_pv2(variant, discriminator_field_name) + if not field: continue # Note: if one variant defines an alias then they all should - discriminator_alias = field_info.alias + discriminator_alias = field.get("serialization_alias") - if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): - for entry in get_args(annotation): + field_schema = field["schema"] + + if field_schema["type"] == "literal": + for entry in cast("LiteralSchema", field_schema)["expected"]: if isinstance(entry, str): mapping[entry] = variant @@ -714,7 +714,7 @@ class GenericModel(BaseGenericModel, BaseModel): pass -if PYDANTIC_V2: +if not PYDANTIC_V1: from pydantic import TypeAdapter as _TypeAdapter _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter)) @@ -782,12 +782,12 @@ class FinalRequestOptions(pydantic.BaseModel): json_data: Union[Body, None] = None extra_json: Union[AnyMapping, None] = None - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] arbitrary_types_allowed: bool = True + else: + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) def get_max_retries(self, max_retries: int) -> int: if isinstance(self.max_retries, NotGiven): @@ -820,9 +820,9 @@ def construct( # type: ignore key: strip_not_given(value) for key, value in values.items() } - if PYDANTIC_V2: - return super().model_construct(_fields_set, **kwargs) - return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + return super().model_construct(_fields_set, **kwargs) if not TYPE_CHECKING: # type checkers incorrectly complain about this assignment diff --git a/src/gradient/_utils/__init__.py b/src/gradient/_utils/__init__.py index ca547ce5..dc64e29a 100644 --- a/src/gradient/_utils/__init__.py +++ b/src/gradient/_utils/__init__.py @@ -10,7 +10,6 @@ lru_cache as lru_cache, is_mapping as is_mapping, is_tuple_t as is_tuple_t, - parse_date as parse_date, is_iterable as is_iterable, is_sequence as is_sequence, coerce_float as coerce_float, @@ -23,7 +22,6 @@ coerce_boolean as coerce_boolean, coerce_integer as coerce_integer, file_from_path as file_from_path, - parse_datetime as parse_datetime, strip_not_given as strip_not_given, deepcopy_minimal as deepcopy_minimal, get_async_library as get_async_library, @@ -32,6 +30,13 @@ maybe_coerce_boolean as maybe_coerce_boolean, maybe_coerce_integer as maybe_coerce_integer, ) +from ._compat import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, +) from ._typing import ( is_list_type as is_list_type, is_union_type as is_union_type, @@ -56,3 +61,4 @@ function_has_argument as function_has_argument, assert_signatures_in_sync as assert_signatures_in_sync, ) +from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime diff --git a/src/gradient/_utils/_compat.py b/src/gradient/_utils/_compat.py new file mode 100644 index 00000000..dd703233 --- /dev/null +++ b/src/gradient/_utils/_compat.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import sys +import typing_extensions +from typing import Any, Type, Union, Literal, Optional +from datetime import date, datetime +from typing_extensions import get_args as _get_args, get_origin as _get_origin + +from .._types import StrBytesIntFloat +from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime + +_LITERAL_TYPES = {Literal, typing_extensions.Literal} + + +def get_args(tp: type[Any]) -> tuple[Any, ...]: + return _get_args(tp) + + +def get_origin(tp: type[Any]) -> type[Any] | None: + return _get_origin(tp) + + +def is_union(tp: Optional[Type[Any]]) -> bool: + if sys.version_info < (3, 10): + return tp is Union # type: ignore[comparison-overlap] + else: + import types + + return tp is Union or tp is types.UnionType + + +def is_typeddict(tp: Type[Any]) -> bool: + return typing_extensions.is_typeddict(tp) + + +def is_literal_type(tp: Type[Any]) -> bool: + return get_origin(tp) in _LITERAL_TYPES + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + return _parse_date(value) + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + return _parse_datetime(value) diff --git a/src/gradient/_utils/_datetime_parse.py b/src/gradient/_utils/_datetime_parse.py new file mode 100644 index 00000000..7cb9d9e6 --- /dev/null +++ b/src/gradient/_utils/_datetime_parse.py @@ -0,0 +1,136 @@ +""" +This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py +without the Pydantic v1 specific errors. +""" + +from __future__ import annotations + +import re +from typing import Dict, Union, Optional +from datetime import date, datetime, timezone, timedelta + +from .._types import StrBytesIntFloat + +date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})" +time_expr = ( + r"(?P\d{1,2}):(?P\d{1,2})" + r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?" + r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$" +) + +date_re = re.compile(f"{date_expr}$") +datetime_re = re.compile(f"{date_expr}[T ]{time_expr}") + + +EPOCH = datetime(1970, 1, 1) +# if greater than this, the number is in ms, if less than or equal it's in seconds +# (in seconds this is 11th October 2603, in ms it's 20th August 1970) +MS_WATERSHED = int(2e10) +# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9 +MAX_NUMBER = int(3e20) + + +def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]: + if isinstance(value, (int, float)): + return value + try: + return float(value) + except ValueError: + return None + except TypeError: + raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None + + +def _from_unix_seconds(seconds: Union[int, float]) -> datetime: + if seconds > MAX_NUMBER: + return datetime.max + elif seconds < -MAX_NUMBER: + return datetime.min + + while abs(seconds) > MS_WATERSHED: + seconds /= 1000 + dt = EPOCH + timedelta(seconds=seconds) + return dt.replace(tzinfo=timezone.utc) + + +def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]: + if value == "Z": + return timezone.utc + elif value is not None: + offset_mins = int(value[-2:]) if len(value) > 3 else 0 + offset = 60 * int(value[1:3]) + offset_mins + if value[0] == "-": + offset = -offset + return timezone(timedelta(minutes=offset)) + else: + return None + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + """ + Parse a datetime/int/float/string and return a datetime.datetime. + + This function supports time zone offsets. When the input contains one, + the output uses a timezone with a fixed offset from UTC. + + Raise ValueError if the input is well formatted but not a valid datetime. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, datetime): + return value + + number = _get_numeric(value, "datetime") + if number is not None: + return _from_unix_seconds(number) + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + + match = datetime_re.match(value) + if match is None: + raise ValueError("invalid datetime format") + + kw = match.groupdict() + if kw["microsecond"]: + kw["microsecond"] = kw["microsecond"].ljust(6, "0") + + tzinfo = _parse_timezone(kw.pop("tzinfo")) + kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} + kw_["tzinfo"] = tzinfo + + return datetime(**kw_) # type: ignore + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + """ + Parse a date/int/float/string and return a datetime.date. + + Raise ValueError if the input is well formatted but not a valid date. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, date): + if isinstance(value, datetime): + return value.date() + else: + return value + + number = _get_numeric(value, "date") + if number is not None: + return _from_unix_seconds(number).date() + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + match = date_re.match(value) + if match is None: + raise ValueError("invalid date format") + + kw = {k: int(v) for k, v in match.groupdict().items()} + + try: + return date(**kw) + except ValueError: + raise ValueError("invalid date format") from None diff --git a/src/gradient/_utils/_transform.py b/src/gradient/_utils/_transform.py index f0bcefd4..c19124f0 100644 --- a/src/gradient/_utils/_transform.py +++ b/src/gradient/_utils/_transform.py @@ -19,6 +19,7 @@ is_sequence, ) from .._files import is_base64_file_input +from ._compat import get_origin, is_typeddict from ._typing import ( is_list_type, is_union_type, @@ -29,7 +30,6 @@ is_annotated_type, strip_annotated_type, ) -from .._compat import get_origin, model_dump, is_typeddict _T = TypeVar("_T") @@ -169,6 +169,8 @@ def _transform_recursive( Defaults to the same value as the `annotation` argument. """ + from .._compat import model_dump + if inner_type is None: inner_type = annotation @@ -333,6 +335,8 @@ async def _async_transform_recursive( Defaults to the same value as the `annotation` argument. """ + from .._compat import model_dump + if inner_type is None: inner_type = annotation diff --git a/src/gradient/_utils/_typing.py b/src/gradient/_utils/_typing.py index 845cd6b2..193109f3 100644 --- a/src/gradient/_utils/_typing.py +++ b/src/gradient/_utils/_typing.py @@ -15,7 +15,7 @@ from ._utils import lru_cache from .._types import InheritsGeneric -from .._compat import is_union as _is_union +from ._compat import is_union as _is_union def is_annotated_type(typ: type) -> bool: diff --git a/src/gradient/_utils/_utils.py b/src/gradient/_utils/_utils.py index ea3cf3f2..f0818595 100644 --- a/src/gradient/_utils/_utils.py +++ b/src/gradient/_utils/_utils.py @@ -22,7 +22,6 @@ import sniffio from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike -from .._compat import parse_date as parse_date, parse_datetime as parse_datetime _T = TypeVar("_T") _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py index 21266ef5..a699a714 100644 --- a/src/gradient/types/__init__.py +++ b/src/gradient/types/__init__.py @@ -97,7 +97,31 @@ # This ensures that, when building the deferred (due to cyclical references) model schema, # Pydantic can resolve the necessary references. # See: https://github.com/pydantic/pydantic/issues/11250 for more context. -if _compat.PYDANTIC_V2: +if _compat.PYDANTIC_V1: + api_agent.APIAgent.update_forward_refs() # type: ignore + api_workspace.APIWorkspace.update_forward_refs() # type: ignore + agent_create_response.AgentCreateResponse.update_forward_refs() # type: ignore + agent_retrieve_response.AgentRetrieveResponse.update_forward_refs() # type: ignore + agent_update_response.AgentUpdateResponse.update_forward_refs() # type: ignore + agent_delete_response.AgentDeleteResponse.update_forward_refs() # type: ignore + agent_update_status_response.AgentUpdateStatusResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_create_response.WorkspaceCreateResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_retrieve_response.WorkspaceRetrieveResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_update_response.WorkspaceUpdateResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore + agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore + agents.function_create_response.FunctionCreateResponse.update_forward_refs() # type: ignore + agents.function_update_response.FunctionUpdateResponse.update_forward_refs() # type: ignore + agents.function_delete_response.FunctionDeleteResponse.update_forward_refs() # type: ignore + agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore + agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore + agents.route_view_response.RouteViewResponse.update_forward_refs() # type: ignore + models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.update_forward_refs() # type: ignore + models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.update_forward_refs() # type: ignore +else: api_agent.APIAgent.model_rebuild(_parent_namespace_depth=0) api_workspace.APIWorkspace.model_rebuild(_parent_namespace_depth=0) agent_create_response.AgentCreateResponse.model_rebuild(_parent_namespace_depth=0) @@ -129,27 +153,3 @@ models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.model_rebuild( _parent_namespace_depth=0 ) -else: - api_agent.APIAgent.update_forward_refs() # type: ignore - api_workspace.APIWorkspace.update_forward_refs() # type: ignore - agent_create_response.AgentCreateResponse.update_forward_refs() # type: ignore - agent_retrieve_response.AgentRetrieveResponse.update_forward_refs() # type: ignore - agent_update_response.AgentUpdateResponse.update_forward_refs() # type: ignore - agent_delete_response.AgentDeleteResponse.update_forward_refs() # type: ignore - agent_update_status_response.AgentUpdateStatusResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.workspace_create_response.WorkspaceCreateResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.workspace_retrieve_response.WorkspaceRetrieveResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.workspace_update_response.WorkspaceUpdateResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore - agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore - agents.function_create_response.FunctionCreateResponse.update_forward_refs() # type: ignore - agents.function_update_response.FunctionUpdateResponse.update_forward_refs() # type: ignore - agents.function_delete_response.FunctionDeleteResponse.update_forward_refs() # type: ignore - agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore - agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore - agents.route_view_response.RouteViewResponse.update_forward_refs() # type: ignore - models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.update_forward_refs() # type: ignore - models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.update_forward_refs() # type: ignore diff --git a/tests/test_models.py b/tests/test_models.py index 9a2ee908..de5ef465 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -8,7 +8,7 @@ from pydantic import Field from gradient._utils import PropertyInfo -from gradient._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradient._compat import PYDANTIC_V1, parse_obj, model_dump, model_json from gradient._models import BaseModel, construct_type @@ -294,12 +294,12 @@ class Model(BaseModel): assert cast(bool, m.foo) is True m = Model.construct(foo={"name": 3}) - if PYDANTIC_V2: - assert isinstance(m.foo, Submodel1) - assert m.foo.name == 3 # type: ignore - else: + if PYDANTIC_V1: assert isinstance(m.foo, Submodel2) assert m.foo.name == "3" + else: + assert isinstance(m.foo, Submodel1) + assert m.foo.name == 3 # type: ignore def test_list_of_unions() -> None: @@ -426,10 +426,10 @@ class Model(BaseModel): expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc) - if PYDANTIC_V2: - expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' - else: + if PYDANTIC_V1: expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}' + else: + expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' model = Model.construct(created_at="2019-12-27T18:11:19.117Z") assert model.created_at == expected @@ -531,7 +531,7 @@ class Model2(BaseModel): assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)} assert m4.to_dict(mode="json") == {"created_at": time_str} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_dict(warnings=False) @@ -556,7 +556,7 @@ class Model(BaseModel): assert m3.model_dump() == {"foo": None} assert m3.model_dump(exclude_none=True) == {} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump(round_trip=True) @@ -580,10 +580,10 @@ class Model(BaseModel): assert json.loads(m.to_json()) == {"FOO": "hello"} assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"} - if PYDANTIC_V2: - assert m.to_json(indent=None) == '{"FOO":"hello"}' - else: + if PYDANTIC_V1: assert m.to_json(indent=None) == '{"FOO": "hello"}' + else: + assert m.to_json(indent=None) == '{"FOO":"hello"}' m2 = Model() assert json.loads(m2.to_json()) == {} @@ -595,7 +595,7 @@ class Model(BaseModel): assert json.loads(m3.to_json()) == {"FOO": None} assert json.loads(m3.to_json(exclude_none=True)) == {} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.to_json(warnings=False) @@ -622,7 +622,7 @@ class Model(BaseModel): assert json.loads(m3.model_dump_json()) == {"foo": None} assert json.loads(m3.model_dump_json(exclude_none=True)) == {} - if not PYDANTIC_V2: + if PYDANTIC_V1: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump_json(round_trip=True) @@ -679,12 +679,12 @@ class B(BaseModel): ) assert isinstance(m, A) assert m.type == "a" - if PYDANTIC_V2: - assert m.data == 100 # type: ignore[comparison-overlap] - else: + if PYDANTIC_V1: # pydantic v1 automatically converts inputs to strings # if the expected type is a str assert m.data == "100" + else: + assert m.data == 100 # type: ignore[comparison-overlap] def test_discriminated_unions_unknown_variant() -> None: @@ -768,12 +768,12 @@ class B(BaseModel): ) assert isinstance(m, A) assert m.foo_type == "a" - if PYDANTIC_V2: - assert m.data == 100 # type: ignore[comparison-overlap] - else: + if PYDANTIC_V1: # pydantic v1 automatically converts inputs to strings # if the expected type is a str assert m.data == "100" + else: + assert m.data == 100 # type: ignore[comparison-overlap] def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None: @@ -833,7 +833,7 @@ class B(BaseModel): assert UnionType.__discriminator__ is discriminator -@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") def test_type_alias_type() -> None: Alias = TypeAliasType("Alias", str) # pyright: ignore @@ -849,7 +849,7 @@ class Model(BaseModel): assert m.union == "bar" -@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1") +@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") def test_field_named_cls() -> None: class Model(BaseModel): cls: str @@ -936,7 +936,7 @@ class Type2(BaseModel): assert isinstance(model.value, InnerType2) -@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now") def test_extra_properties() -> None: class Item(BaseModel): prop: int diff --git a/tests/test_transform.py b/tests/test_transform.py index 552462fa..db909f25 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -15,7 +15,7 @@ parse_datetime, async_transform as _async_transform, ) -from gradient._compat import PYDANTIC_V2 +from gradient._compat import PYDANTIC_V1 from gradient._models import BaseModel _T = TypeVar("_T") @@ -189,7 +189,7 @@ class DateModel(BaseModel): @pytest.mark.asyncio async def test_iso8601_format(use_async: bool) -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") - tz = "Z" if PYDANTIC_V2 else "+00:00" + tz = "+00:00" if PYDANTIC_V1 else "Z" assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap] @@ -297,11 +297,11 @@ async def test_pydantic_unknown_field(use_async: bool) -> None: @pytest.mark.asyncio async def test_pydantic_mismatched_types(use_async: bool) -> None: model = MyModel.construct(foo=True) - if PYDANTIC_V2: + if PYDANTIC_V1: + params = await transform(model, Any, use_async) + else: with pytest.warns(UserWarning): params = await transform(model, Any, use_async) - else: - params = await transform(model, Any, use_async) assert cast(Any, params) == {"foo": True} @@ -309,11 +309,11 @@ async def test_pydantic_mismatched_types(use_async: bool) -> None: @pytest.mark.asyncio async def test_pydantic_mismatched_object_type(use_async: bool) -> None: model = MyModel.construct(foo=MyModel.construct(hello="world")) - if PYDANTIC_V2: + if PYDANTIC_V1: + params = await transform(model, Any, use_async) + else: with pytest.warns(UserWarning): params = await transform(model, Any, use_async) - else: - params = await transform(model, Any, use_async) assert cast(Any, params) == {"foo": {"hello": "world"}} diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py new file mode 100644 index 00000000..6cbb1b6f --- /dev/null +++ b/tests/test_utils/test_datetime_parse.py @@ -0,0 +1,110 @@ +""" +Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py +with modifications so it works without pydantic v1 imports. +""" + +from typing import Type, Union +from datetime import date, datetime, timezone, timedelta + +import pytest + +from gradient._utils import parse_date, parse_datetime + + +def create_tz(minutes: int) -> timezone: + return timezone(timedelta(minutes=minutes)) + + +@pytest.mark.parametrize( + "value,result", + [ + # Valid inputs + ("1494012444.883309", date(2017, 5, 5)), + (b"1494012444.883309", date(2017, 5, 5)), + (1_494_012_444.883_309, date(2017, 5, 5)), + ("1494012444", date(2017, 5, 5)), + (1_494_012_444, date(2017, 5, 5)), + (0, date(1970, 1, 1)), + ("2012-04-23", date(2012, 4, 23)), + (b"2012-04-23", date(2012, 4, 23)), + ("2012-4-9", date(2012, 4, 9)), + (date(2012, 4, 9), date(2012, 4, 9)), + (datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)), + # Invalid inputs + ("x20120423", ValueError), + ("2012-04-56", ValueError), + (19_999_999_999, date(2603, 10, 11)), # just before watershed + (20_000_000_001, date(1970, 8, 20)), # just after watershed + (1_549_316_052, date(2019, 2, 4)), # nowish in s + (1_549_316_052_104, date(2019, 2, 4)), # nowish in ms + (1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs + (1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns + ("infinity", date(9999, 12, 31)), + ("inf", date(9999, 12, 31)), + (float("inf"), date(9999, 12, 31)), + ("infinity ", date(9999, 12, 31)), + (int("1" + "0" * 100), date(9999, 12, 31)), + (1e1000, date(9999, 12, 31)), + ("-infinity", date(1, 1, 1)), + ("-inf", date(1, 1, 1)), + ("nan", ValueError), + ], +) +def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None: + if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance] + with pytest.raises(result): + parse_date(value) + else: + assert parse_date(value) == result + + +@pytest.mark.parametrize( + "value,result", + [ + # Valid inputs + # values in seconds + ("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)), + (1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)), + ("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + (b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + (1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + # values in ms + ("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)), + ("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)), + (1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)), + ("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)), + ("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)), + ("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)), + ("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))), + ("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))), + ("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))), + ("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))), + (b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))), + (datetime(2017, 5, 5), datetime(2017, 5, 5)), + (0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)), + # Invalid inputs + ("x20120423091500", ValueError), + ("2012-04-56T09:15:90", ValueError), + ("2012-04-23T11:05:00-25:00", ValueError), + (19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed + (20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed + (1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s + (1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms + (1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs + (1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns + ("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)), + (1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)), + (float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)), + ("-infinity", datetime(1, 1, 1, 0, 0)), + ("-inf", datetime(1, 1, 1, 0, 0)), + ("nan", ValueError), + ], +) +def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None: + if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance] + with pytest.raises(result): + parse_datetime(value) + else: + assert parse_datetime(value) == result diff --git a/tests/utils.py b/tests/utils.py index ac014538..8d9112d6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -19,7 +19,7 @@ is_annotated_type, is_type_alias_type, ) -from gradient._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradient._compat import PYDANTIC_V1, field_outer_type, get_model_fields from gradient._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) @@ -28,12 +28,12 @@ def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool: for name, field in get_model_fields(model).items(): field_value = getattr(value, name) - if PYDANTIC_V2: - allow_none = False - else: + if PYDANTIC_V1: # in v1 nullability was structured differently # https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields allow_none = getattr(field, "allow_none", False) + else: + allow_none = False assert_matches_type( field_outer_type(field), From 19ff724ba6fdfd2e80b9cf774e7f6308979a0ce7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 5 Sep 2025 04:30:15 +0000 Subject: [PATCH 163/200] chore(internal): move mypy configurations to `pyproject.toml` file --- mypy.ini | 50 ------------------------------------------------ pyproject.toml | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 50 deletions(-) delete mode 100644 mypy.ini diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index 9a8e555e..00000000 --- a/mypy.ini +++ /dev/null @@ -1,50 +0,0 @@ -[mypy] -pretty = True -show_error_codes = True - -# Exclude _files.py because mypy isn't smart enough to apply -# the correct type narrowing and as this is an internal module -# it's fine to just use Pyright. -# -# We also exclude our `tests` as mypy doesn't always infer -# types correctly and Pyright will still catch any type errors. -exclude = ^(src/gradient/_files\.py|_dev/.*\.py|tests/.*)$ - -strict_equality = True -implicit_reexport = True -check_untyped_defs = True -no_implicit_optional = True - -warn_return_any = True -warn_unreachable = True -warn_unused_configs = True - -# Turn these options off as it could cause conflicts -# with the Pyright options. -warn_unused_ignores = False -warn_redundant_casts = False - -disallow_any_generics = True -disallow_untyped_defs = True -disallow_untyped_calls = True -disallow_subclassing_any = True -disallow_incomplete_defs = True -disallow_untyped_decorators = True -cache_fine_grained = True - -# By default, mypy reports an error if you assign a value to the result -# of a function call that doesn't return anything. We do this in our test -# cases: -# ``` -# result = ... -# assert result is None -# ``` -# Changing this codegen to make mypy happy would increase complexity -# and would not be worth it. -disable_error_code = func-returns-value,overload-cannot-match - -# https://github.com/python/mypy/issues/12162 -[mypy.overrides] -module = "black.files.*" -ignore_errors = true -ignore_missing_imports = true diff --git a/pyproject.toml b/pyproject.toml index 595bb661..6d4723ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -157,6 +157,58 @@ reportOverlappingOverload = false reportImportCycles = false reportPrivateUsage = false +[tool.mypy] +pretty = true +show_error_codes = true + +# Exclude _files.py because mypy isn't smart enough to apply +# the correct type narrowing and as this is an internal module +# it's fine to just use Pyright. +# +# We also exclude our `tests` as mypy doesn't always infer +# types correctly and Pyright will still catch any type errors. +exclude = ['src/gradient/_files.py', '_dev/.*.py', 'tests/.*'] + +strict_equality = true +implicit_reexport = true +check_untyped_defs = true +no_implicit_optional = true + +warn_return_any = true +warn_unreachable = true +warn_unused_configs = true + +# Turn these options off as it could cause conflicts +# with the Pyright options. +warn_unused_ignores = false +warn_redundant_casts = false + +disallow_any_generics = true +disallow_untyped_defs = true +disallow_untyped_calls = true +disallow_subclassing_any = true +disallow_incomplete_defs = true +disallow_untyped_decorators = true +cache_fine_grained = true + +# By default, mypy reports an error if you assign a value to the result +# of a function call that doesn't return anything. We do this in our test +# cases: +# ``` +# result = ... +# assert result is None +# ``` +# Changing this codegen to make mypy happy would increase complexity +# and would not be worth it. +disable_error_code = "func-returns-value,overload-cannot-match" + +# https://github.com/python/mypy/issues/12162 +[[tool.mypy.overrides]] +module = "black.files.*" +ignore_errors = true +ignore_missing_imports = true + + [tool.ruff] line-length = 120 output-format = "grouped" From 3690a671ee1e082d3de1de8c1e98558cec10bd78 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 6 Sep 2025 05:16:48 +0000 Subject: [PATCH 164/200] chore(tests): simplify `get_platform` test `nest_asyncio` is archived and broken on some platforms so it's not worth keeping in our test suite. --- pyproject.toml | 1 - requirements-dev.lock | 1 - tests/test_client.py | 53 +++++-------------------------------------- 3 files changed, 6 insertions(+), 49 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6d4723ec..6d9f6527 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,7 +56,6 @@ dev-dependencies = [ "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "rich>=13.7.1", - "nest_asyncio==1.6.0", "pytest-xdist>=3.6.1", ] diff --git a/requirements-dev.lock b/requirements-dev.lock index 7a0f60ab..af44e06b 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -75,7 +75,6 @@ multidict==6.4.4 mypy==1.14.1 mypy-extensions==1.0.0 # via mypy -nest-asyncio==1.6.0 nodeenv==1.8.0 # via pyright nox==2023.4.22 diff --git a/tests/test_client.py b/tests/test_client.py index e21ee831..ed0d0180 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -6,13 +6,10 @@ import os import sys import json -import time import asyncio import inspect -import subprocess import tracemalloc from typing import Any, Union, cast -from textwrap import dedent from unittest import mock from typing_extensions import Literal @@ -23,6 +20,7 @@ from gradient import Gradient, AsyncGradient, APIResponseValidationError from gradient._types import Omit +from gradient._utils import asyncify from gradient._models import BaseModel, FinalRequestOptions from gradient._streaming import Stream, AsyncStream from gradient._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError @@ -30,8 +28,10 @@ DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + OtherPlatform, DefaultHttpxClient, DefaultAsyncHttpxClient, + get_platform, make_request_options, ) @@ -2031,50 +2031,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" - def test_get_platform(self) -> None: - # A previous implementation of asyncify could leave threads unterminated when - # used with nest_asyncio. - # - # Since nest_asyncio.apply() is global and cannot be un-applied, this - # test is run in a separate process to avoid affecting other tests. - test_code = dedent(""" - import asyncio - import nest_asyncio - import threading - - from gradient._utils import asyncify - from gradient._base_client import get_platform - - async def test_main() -> None: - result = await asyncify(get_platform)() - print(result) - for thread in threading.enumerate(): - print(thread.name) - - nest_asyncio.apply() - asyncio.run(test_main()) - """) - with subprocess.Popen( - [sys.executable, "-c", test_code], - text=True, - ) as process: - timeout = 10 # seconds - - start_time = time.monotonic() - while True: - return_code = process.poll() - if return_code is not None: - if return_code != 0: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") - - # success - break - - if time.monotonic() - start_time > timeout: - process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") - - time.sleep(0.1) + async def test_get_platform(self) -> None: + platform = await asyncify(get_platform)() + assert isinstance(platform, (str, OtherPlatform)) async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: # Test that the proxy environment variables are set correctly From 56bcaafa936f4e6424f067e49b213fd56e77e8d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 18:02:43 +0000 Subject: [PATCH 165/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2ce88448..9dcd5cc8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.4" + ".": "3.0.0-beta.5" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 6d9f6527..5d84c1c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.4" +version = "3.0.0-beta.5" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 428a5fa9..c7adeab4 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.4" # x-release-please-version +__version__ = "3.0.0-beta.5" # x-release-please-version From 0c31327019ffa5659cadad7d8994fd29b6701078 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 12 Sep 2025 15:29:45 +0000 Subject: [PATCH 166/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4a621094..512aabaf 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 6c8d569b60ae6536708a165b72ff838f +config_hash: 08281b73cbc4aa830d1fa79914dc79fe From 4aa80284dc1141da630272e7d8efb5c2f3e91420 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 12 Sep 2025 15:30:12 +0000 Subject: [PATCH 167/200] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 512aabaf..df2d2abe 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 08281b73cbc4aa830d1fa79914dc79fe +config_hash: 6e3aacb34562b5a1ef74bb279f55cbec From a9e3a086ae8ed7bdbbcbecd336bec907997f8f8a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 12 Sep 2025 15:53:44 +0000 Subject: [PATCH 168/200] feat(api): enable typescript --- .stats.yml | 4 +- api.md | 26 ++ src/gradient/resources/agents/agents.py | 120 +++++++++ .../agents/evaluation_metrics/__init__.py | 14 ++ .../evaluation_metrics/evaluation_metrics.py | 32 +++ .../evaluation_metrics/oauth2/__init__.py | 33 +++ .../evaluation_metrics/oauth2/dropbox.py | 193 +++++++++++++++ .../evaluation_metrics/oauth2/oauth2.py | 229 ++++++++++++++++++ src/gradient/types/__init__.py | 2 + .../types/agent_retrieve_usage_params.py | 18 ++ .../types/agent_retrieve_usage_response.py | 48 ++++ .../agents/evaluation_metrics/__init__.py | 2 + .../evaluation_metrics/oauth2/__init__.py | 6 + .../oauth2/dropbox_create_tokens_params.py | 15 ++ .../oauth2/dropbox_create_tokens_response.py | 15 ++ .../oauth2_generate_url_params.py | 15 ++ .../oauth2_generate_url_response.py | 12 + .../evaluation_metrics/oauth2/__init__.py | 1 + .../evaluation_metrics/oauth2/test_dropbox.py | 100 ++++++++ .../agents/evaluation_metrics/test_oauth2.py | 98 ++++++++ tests/api_resources/test_agents.py | 105 ++++++++ 21 files changed, 1086 insertions(+), 2 deletions(-) create mode 100644 src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py create mode 100644 src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py create mode 100644 src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py create mode 100644 src/gradient/types/agent_retrieve_usage_params.py create mode 100644 src/gradient/types/agent_retrieve_usage_response.py create mode 100644 src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py create mode 100644 src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py create mode 100644 src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py create mode 100644 src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py create mode 100644 src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py create mode 100644 tests/api_resources/agents/evaluation_metrics/oauth2/__init__.py create mode 100644 tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py create mode 100644 tests/api_resources/agents/evaluation_metrics/test_oauth2.py diff --git a/.stats.yml b/.stats.yml index df2d2abe..e30c19b7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 170 +configured_endpoints: 173 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 6e3aacb34562b5a1ef74bb279f55cbec +config_hash: 3d425c415b7f7ab581418b43eb521cb3 diff --git a/api.md b/api.md index 1091e4dc..7299b3c6 100644 --- a/api.md +++ b/api.md @@ -51,6 +51,7 @@ from gradient.types import ( AgentUpdateResponse, AgentListResponse, AgentDeleteResponse, + AgentRetrieveUsageResponse, AgentUpdateStatusResponse, ) ``` @@ -62,6 +63,7 @@ Methods: - client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse - client.agents.list(\*\*params) -> AgentListResponse - client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.retrieve_usage(uuid, \*\*params) -> AgentRetrieveUsageResponse - client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys @@ -214,6 +216,30 @@ Methods: - client.agents.evaluation_metrics.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse - client.agents.evaluation_metrics.openai.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse +### Oauth2 + +Types: + +```python +from gradient.types.agents.evaluation_metrics import Oauth2GenerateURLResponse +``` + +Methods: + +- client.agents.evaluation_metrics.oauth2.generate_url(\*\*params) -> Oauth2GenerateURLResponse + +#### Dropbox + +Types: + +```python +from gradient.types.agents.evaluation_metrics.oauth2 import DropboxCreateTokensResponse +``` + +Methods: + +- client.agents.evaluation_metrics.oauth2.dropbox.create_tokens(\*\*params) -> DropboxCreateTokensResponse + ## EvaluationRuns Types: diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py index 8d06584c..590b9a76 100644 --- a/src/gradient/resources/agents/agents.py +++ b/src/gradient/resources/agents/agents.py @@ -19,6 +19,7 @@ agent_create_params, agent_update_params, agent_update_status_params, + agent_retrieve_usage_params, ) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform @@ -103,6 +104,7 @@ from ...types.agent_retrieve_response import AgentRetrieveResponse from ...types.api_deployment_visibility import APIDeploymentVisibility from ...types.agent_update_status_response import AgentUpdateStatusResponse +from ...types.agent_retrieve_usage_response import AgentRetrieveUsageResponse from .evaluation_metrics.evaluation_metrics import ( EvaluationMetricsResource, AsyncEvaluationMetricsResource, @@ -500,6 +502,59 @@ def delete( cast_to=AgentDeleteResponse, ) + def retrieve_usage( + self, + uuid: str, + *, + start: str | NotGiven = NOT_GIVEN, + stop: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveUsageResponse: + """ + To get agent usage, send a GET request to `/v2/gen-ai/agents/{uuid}/usage`. + Returns usage metrics for the specified agent within the provided time range. + + Args: + start: Return all usage data from this date. + + stop: Return all usage data up to this date, if omitted, will return up to the current + date. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{uuid}/usage" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/usage", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "start": start, + "stop": stop, + }, + agent_retrieve_usage_params.AgentRetrieveUsageParams, + ), + ), + cast_to=AgentRetrieveUsageResponse, + ) + def update_status( self, path_uuid: str, @@ -943,6 +998,59 @@ async def delete( cast_to=AgentDeleteResponse, ) + async def retrieve_usage( + self, + uuid: str, + *, + start: str | NotGiven = NOT_GIVEN, + stop: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveUsageResponse: + """ + To get agent usage, send a GET request to `/v2/gen-ai/agents/{uuid}/usage`. + Returns usage metrics for the specified agent within the provided time range. + + Args: + start: Return all usage data from this date. + + stop: Return all usage data up to this date, if omitted, will return up to the current + date. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{uuid}/usage" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/usage", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "start": start, + "stop": stop, + }, + agent_retrieve_usage_params.AgentRetrieveUsageParams, + ), + ), + cast_to=AgentRetrieveUsageResponse, + ) + async def update_status( self, path_uuid: str, @@ -1020,6 +1128,9 @@ def __init__(self, agents: AgentsResource) -> None: self.delete = to_raw_response_wrapper( agents.delete, ) + self.retrieve_usage = to_raw_response_wrapper( + agents.retrieve_usage, + ) self.update_status = to_raw_response_wrapper( agents.update_status, ) @@ -1084,6 +1195,9 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.delete = async_to_raw_response_wrapper( agents.delete, ) + self.retrieve_usage = async_to_raw_response_wrapper( + agents.retrieve_usage, + ) self.update_status = async_to_raw_response_wrapper( agents.update_status, ) @@ -1148,6 +1262,9 @@ def __init__(self, agents: AgentsResource) -> None: self.delete = to_streamed_response_wrapper( agents.delete, ) + self.retrieve_usage = to_streamed_response_wrapper( + agents.retrieve_usage, + ) self.update_status = to_streamed_response_wrapper( agents.update_status, ) @@ -1212,6 +1329,9 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.delete = async_to_streamed_response_wrapper( agents.delete, ) + self.retrieve_usage = async_to_streamed_response_wrapper( + agents.retrieve_usage, + ) self.update_status = async_to_streamed_response_wrapper( agents.update_status, ) diff --git a/src/gradient/resources/agents/evaluation_metrics/__init__.py b/src/gradient/resources/agents/evaluation_metrics/__init__.py index 92449820..31e2f93b 100644 --- a/src/gradient/resources/agents/evaluation_metrics/__init__.py +++ b/src/gradient/resources/agents/evaluation_metrics/__init__.py @@ -8,6 +8,14 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) +from .oauth2 import ( + Oauth2Resource, + AsyncOauth2Resource, + Oauth2ResourceWithRawResponse, + AsyncOauth2ResourceWithRawResponse, + Oauth2ResourceWithStreamingResponse, + AsyncOauth2ResourceWithStreamingResponse, +) from .openai import ( OpenAIResource, AsyncOpenAIResource, @@ -66,6 +74,12 @@ "AsyncOpenAIResourceWithRawResponse", "OpenAIResourceWithStreamingResponse", "AsyncOpenAIResourceWithStreamingResponse", + "Oauth2Resource", + "AsyncOauth2Resource", + "Oauth2ResourceWithRawResponse", + "AsyncOauth2ResourceWithRawResponse", + "Oauth2ResourceWithStreamingResponse", + "AsyncOauth2ResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py index f6453d4d..b9e1386b 100644 --- a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -22,6 +22,14 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .oauth2.oauth2 import ( + Oauth2Resource, + AsyncOauth2Resource, + Oauth2ResourceWithRawResponse, + AsyncOauth2ResourceWithRawResponse, + Oauth2ResourceWithStreamingResponse, + AsyncOauth2ResourceWithStreamingResponse, +) from .openai.openai import ( OpenAIResource, AsyncOpenAIResource, @@ -71,6 +79,10 @@ def anthropic(self) -> AnthropicResource: def openai(self) -> OpenAIResource: return OpenAIResource(self._client) + @cached_property + def oauth2(self) -> Oauth2Resource: + return Oauth2Resource(self._client) + @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -180,6 +192,10 @@ def anthropic(self) -> AsyncAnthropicResource: def openai(self) -> AsyncOpenAIResource: return AsyncOpenAIResource(self._client) + @cached_property + def oauth2(self) -> AsyncOauth2Resource: + return AsyncOauth2Resource(self._client) + @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -299,6 +315,10 @@ def anthropic(self) -> AnthropicResourceWithRawResponse: def openai(self) -> OpenAIResourceWithRawResponse: return OpenAIResourceWithRawResponse(self._evaluation_metrics.openai) + @cached_property + def oauth2(self) -> Oauth2ResourceWithRawResponse: + return Oauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2) + class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -327,6 +347,10 @@ def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: def openai(self) -> AsyncOpenAIResourceWithRawResponse: return AsyncOpenAIResourceWithRawResponse(self._evaluation_metrics.openai) + @cached_property + def oauth2(self) -> AsyncOauth2ResourceWithRawResponse: + return AsyncOauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2) + class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -355,6 +379,10 @@ def anthropic(self) -> AnthropicResourceWithStreamingResponse: def openai(self) -> OpenAIResourceWithStreamingResponse: return OpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai) + @cached_property + def oauth2(self) -> Oauth2ResourceWithStreamingResponse: + return Oauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2) + class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -382,3 +410,7 @@ def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: @cached_property def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: return AsyncOpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai) + + @cached_property + def oauth2(self) -> AsyncOauth2ResourceWithStreamingResponse: + return AsyncOauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2) diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py new file mode 100644 index 00000000..c74ddfe8 --- /dev/null +++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .oauth2 import ( + Oauth2Resource, + AsyncOauth2Resource, + Oauth2ResourceWithRawResponse, + AsyncOauth2ResourceWithRawResponse, + Oauth2ResourceWithStreamingResponse, + AsyncOauth2ResourceWithStreamingResponse, +) +from .dropbox import ( + DropboxResource, + AsyncDropboxResource, + DropboxResourceWithRawResponse, + AsyncDropboxResourceWithRawResponse, + DropboxResourceWithStreamingResponse, + AsyncDropboxResourceWithStreamingResponse, +) + +__all__ = [ + "DropboxResource", + "AsyncDropboxResource", + "DropboxResourceWithRawResponse", + "AsyncDropboxResourceWithRawResponse", + "DropboxResourceWithStreamingResponse", + "AsyncDropboxResourceWithStreamingResponse", + "Oauth2Resource", + "AsyncOauth2Resource", + "Oauth2ResourceWithRawResponse", + "AsyncOauth2ResourceWithRawResponse", + "Oauth2ResourceWithStreamingResponse", + "AsyncOauth2ResourceWithStreamingResponse", +] diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py new file mode 100644 index 00000000..caa7d0d3 --- /dev/null +++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py @@ -0,0 +1,193 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.agents.evaluation_metrics.oauth2 import dropbox_create_tokens_params +from .....types.agents.evaluation_metrics.oauth2.dropbox_create_tokens_response import DropboxCreateTokensResponse + +__all__ = ["DropboxResource", "AsyncDropboxResource"] + + +class DropboxResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DropboxResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return DropboxResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DropboxResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return DropboxResourceWithStreamingResponse(self) + + def create_tokens( + self, + *, + code: str | NotGiven = NOT_GIVEN, + redirect_url: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DropboxCreateTokensResponse: + """ + To obtain the refresh token, needed for creation of data sources, send a GET + request to `/v2/gen-ai/oauth2/dropbox/tokens`. Pass the code you obtrained from + the oauth flow in the field 'code' + + Args: + code: The oauth2 code from google + + redirect_url: Redirect url + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/oauth2/dropbox/tokens" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/oauth2/dropbox/tokens", + body=maybe_transform( + { + "code": code, + "redirect_url": redirect_url, + }, + dropbox_create_tokens_params.DropboxCreateTokensParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DropboxCreateTokensResponse, + ) + + +class AsyncDropboxResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDropboxResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncDropboxResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDropboxResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncDropboxResourceWithStreamingResponse(self) + + async def create_tokens( + self, + *, + code: str | NotGiven = NOT_GIVEN, + redirect_url: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DropboxCreateTokensResponse: + """ + To obtain the refresh token, needed for creation of data sources, send a GET + request to `/v2/gen-ai/oauth2/dropbox/tokens`. Pass the code you obtrained from + the oauth flow in the field 'code' + + Args: + code: The oauth2 code from google + + redirect_url: Redirect url + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/oauth2/dropbox/tokens" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/oauth2/dropbox/tokens", + body=await async_maybe_transform( + { + "code": code, + "redirect_url": redirect_url, + }, + dropbox_create_tokens_params.DropboxCreateTokensParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DropboxCreateTokensResponse, + ) + + +class DropboxResourceWithRawResponse: + def __init__(self, dropbox: DropboxResource) -> None: + self._dropbox = dropbox + + self.create_tokens = to_raw_response_wrapper( + dropbox.create_tokens, + ) + + +class AsyncDropboxResourceWithRawResponse: + def __init__(self, dropbox: AsyncDropboxResource) -> None: + self._dropbox = dropbox + + self.create_tokens = async_to_raw_response_wrapper( + dropbox.create_tokens, + ) + + +class DropboxResourceWithStreamingResponse: + def __init__(self, dropbox: DropboxResource) -> None: + self._dropbox = dropbox + + self.create_tokens = to_streamed_response_wrapper( + dropbox.create_tokens, + ) + + +class AsyncDropboxResourceWithStreamingResponse: + def __init__(self, dropbox: AsyncDropboxResource) -> None: + self._dropbox = dropbox + + self.create_tokens = async_to_streamed_response_wrapper( + dropbox.create_tokens, + ) diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py new file mode 100644 index 00000000..8063ce5a --- /dev/null +++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py @@ -0,0 +1,229 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .dropbox import ( + DropboxResource, + AsyncDropboxResource, + DropboxResourceWithRawResponse, + AsyncDropboxResourceWithRawResponse, + DropboxResourceWithStreamingResponse, + AsyncDropboxResourceWithStreamingResponse, +) +from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._utils import maybe_transform, async_maybe_transform +from ....._compat import cached_property +from ....._resource import SyncAPIResource, AsyncAPIResource +from ....._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ....._base_client import make_request_options +from .....types.agents.evaluation_metrics import oauth2_generate_url_params +from .....types.agents.evaluation_metrics.oauth2_generate_url_response import Oauth2GenerateURLResponse + +__all__ = ["Oauth2Resource", "AsyncOauth2Resource"] + + +class Oauth2Resource(SyncAPIResource): + @cached_property + def dropbox(self) -> DropboxResource: + return DropboxResource(self._client) + + @cached_property + def with_raw_response(self) -> Oauth2ResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return Oauth2ResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> Oauth2ResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return Oauth2ResourceWithStreamingResponse(self) + + def generate_url( + self, + *, + redirect_url: str | NotGiven = NOT_GIVEN, + type: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Oauth2GenerateURLResponse: + """ + To generate an Oauth2-URL for use with your localhost, send a GET request to + `/v2/gen-ai/oauth2/url`. Pass 'http://localhost:3000 as redirect_url + + Args: + redirect_url: The redirect url. + + type: Type "google" / "dropbox". + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/oauth2/url" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/oauth2/url", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "redirect_url": redirect_url, + "type": type, + }, + oauth2_generate_url_params.Oauth2GenerateURLParams, + ), + ), + cast_to=Oauth2GenerateURLResponse, + ) + + +class AsyncOauth2Resource(AsyncAPIResource): + @cached_property + def dropbox(self) -> AsyncDropboxResource: + return AsyncDropboxResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOauth2ResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncOauth2ResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOauth2ResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncOauth2ResourceWithStreamingResponse(self) + + async def generate_url( + self, + *, + redirect_url: str | NotGiven = NOT_GIVEN, + type: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Oauth2GenerateURLResponse: + """ + To generate an Oauth2-URL for use with your localhost, send a GET request to + `/v2/gen-ai/oauth2/url`. Pass 'http://localhost:3000 as redirect_url + + Args: + redirect_url: The redirect url. + + type: Type "google" / "dropbox". + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/oauth2/url" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/oauth2/url", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "redirect_url": redirect_url, + "type": type, + }, + oauth2_generate_url_params.Oauth2GenerateURLParams, + ), + ), + cast_to=Oauth2GenerateURLResponse, + ) + + +class Oauth2ResourceWithRawResponse: + def __init__(self, oauth2: Oauth2Resource) -> None: + self._oauth2 = oauth2 + + self.generate_url = to_raw_response_wrapper( + oauth2.generate_url, + ) + + @cached_property + def dropbox(self) -> DropboxResourceWithRawResponse: + return DropboxResourceWithRawResponse(self._oauth2.dropbox) + + +class AsyncOauth2ResourceWithRawResponse: + def __init__(self, oauth2: AsyncOauth2Resource) -> None: + self._oauth2 = oauth2 + + self.generate_url = async_to_raw_response_wrapper( + oauth2.generate_url, + ) + + @cached_property + def dropbox(self) -> AsyncDropboxResourceWithRawResponse: + return AsyncDropboxResourceWithRawResponse(self._oauth2.dropbox) + + +class Oauth2ResourceWithStreamingResponse: + def __init__(self, oauth2: Oauth2Resource) -> None: + self._oauth2 = oauth2 + + self.generate_url = to_streamed_response_wrapper( + oauth2.generate_url, + ) + + @cached_property + def dropbox(self) -> DropboxResourceWithStreamingResponse: + return DropboxResourceWithStreamingResponse(self._oauth2.dropbox) + + +class AsyncOauth2ResourceWithStreamingResponse: + def __init__(self, oauth2: AsyncOauth2Resource) -> None: + self._oauth2 = oauth2 + + self.generate_url = async_to_streamed_response_wrapper( + oauth2.generate_url, + ) + + @cached_property + def dropbox(self) -> AsyncDropboxResourceWithStreamingResponse: + return AsyncDropboxResourceWithStreamingResponse(self._oauth2.dropbox) diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py index a699a714..42faaff6 100644 --- a/src/gradient/types/__init__.py +++ b/src/gradient/types/__init__.py @@ -73,12 +73,14 @@ from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .agent_retrieve_usage_params import AgentRetrieveUsageParams as AgentRetrieveUsageParams from .droplet_backup_policy_param import DropletBackupPolicyParam as DropletBackupPolicyParam from .gpu_droplet_create_response import GPUDropletCreateResponse as GPUDropletCreateResponse from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .agent_retrieve_usage_response import AgentRetrieveUsageResponse as AgentRetrieveUsageResponse from .gpu_droplet_retrieve_response import GPUDropletRetrieveResponse as GPUDropletRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse diff --git a/src/gradient/types/agent_retrieve_usage_params.py b/src/gradient/types/agent_retrieve_usage_params.py new file mode 100644 index 00000000..f5471151 --- /dev/null +++ b/src/gradient/types/agent_retrieve_usage_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AgentRetrieveUsageParams"] + + +class AgentRetrieveUsageParams(TypedDict, total=False): + start: str + """Return all usage data from this date.""" + + stop: str + """ + Return all usage data up to this date, if omitted, will return up to the current + date. + """ diff --git a/src/gradient/types/agent_retrieve_usage_response.py b/src/gradient/types/agent_retrieve_usage_response.py new file mode 100644 index 00000000..1d65addd --- /dev/null +++ b/src/gradient/types/agent_retrieve_usage_response.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["AgentRetrieveUsageResponse", "LogInsightsUsage", "LogInsightsUsageMeasurement", "Usage", "UsageMeasurement"] + + +class LogInsightsUsageMeasurement(BaseModel): + tokens: Optional[int] = None + + usage_type: Optional[str] = None + + +class LogInsightsUsage(BaseModel): + measurements: Optional[List[LogInsightsUsageMeasurement]] = None + + resource_uuid: Optional[str] = None + + start: Optional[datetime] = None + + stop: Optional[datetime] = None + + +class UsageMeasurement(BaseModel): + tokens: Optional[int] = None + + usage_type: Optional[str] = None + + +class Usage(BaseModel): + measurements: Optional[List[UsageMeasurement]] = None + + resource_uuid: Optional[str] = None + + start: Optional[datetime] = None + + stop: Optional[datetime] = None + + +class AgentRetrieveUsageResponse(BaseModel): + log_insights_usage: Optional[LogInsightsUsage] = None + """Resource Usage Description""" + + usage: Optional[Usage] = None + """Resource Usage Description""" diff --git a/src/gradient/types/agents/evaluation_metrics/__init__.py b/src/gradient/types/agents/evaluation_metrics/__init__.py index c349624b..95d81dd2 100644 --- a/src/gradient/types/agents/evaluation_metrics/__init__.py +++ b/src/gradient/types/agents/evaluation_metrics/__init__.py @@ -10,7 +10,9 @@ from .workspace_create_response import WorkspaceCreateResponse as WorkspaceCreateResponse from .workspace_delete_response import WorkspaceDeleteResponse as WorkspaceDeleteResponse from .workspace_update_response import WorkspaceUpdateResponse as WorkspaceUpdateResponse +from .oauth2_generate_url_params import Oauth2GenerateURLParams as Oauth2GenerateURLParams from .workspace_retrieve_response import WorkspaceRetrieveResponse as WorkspaceRetrieveResponse +from .oauth2_generate_url_response import Oauth2GenerateURLResponse as Oauth2GenerateURLResponse from .workspace_list_evaluation_test_cases_response import ( WorkspaceListEvaluationTestCasesResponse as WorkspaceListEvaluationTestCasesResponse, ) diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py b/src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py new file mode 100644 index 00000000..e686ce35 --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/oauth2/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .dropbox_create_tokens_params import DropboxCreateTokensParams as DropboxCreateTokensParams +from .dropbox_create_tokens_response import DropboxCreateTokensResponse as DropboxCreateTokensResponse diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py new file mode 100644 index 00000000..00d22cce --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["DropboxCreateTokensParams"] + + +class DropboxCreateTokensParams(TypedDict, total=False): + code: str + """The oauth2 code from google""" + + redirect_url: str + """Redirect url""" diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py new file mode 100644 index 00000000..816b89f4 --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/oauth2/dropbox_create_tokens_response.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ....._models import BaseModel + +__all__ = ["DropboxCreateTokensResponse"] + + +class DropboxCreateTokensResponse(BaseModel): + token: Optional[str] = None + """The access token""" + + refresh_token: Optional[str] = None + """The refresh token""" diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py new file mode 100644 index 00000000..68924774 --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["Oauth2GenerateURLParams"] + + +class Oauth2GenerateURLParams(TypedDict, total=False): + redirect_url: str + """The redirect url.""" + + type: str + """Type "google" / "dropbox".""" diff --git a/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py new file mode 100644 index 00000000..8be21b8a --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/oauth2_generate_url_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["Oauth2GenerateURLResponse"] + + +class Oauth2GenerateURLResponse(BaseModel): + url: Optional[str] = None + """The oauth2 url""" diff --git a/tests/api_resources/agents/evaluation_metrics/oauth2/__init__.py b/tests/api_resources/agents/evaluation_metrics/oauth2/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/oauth2/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py b/tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py new file mode 100644 index 00000000..417bb3b1 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/oauth2/test_dropbox.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types.agents.evaluation_metrics.oauth2 import DropboxCreateTokensResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDropbox: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_tokens(self, client: Gradient) -> None: + dropbox = client.agents.evaluation_metrics.oauth2.dropbox.create_tokens() + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_tokens_with_all_params(self, client: Gradient) -> None: + dropbox = client.agents.evaluation_metrics.oauth2.dropbox.create_tokens( + code="example string", + redirect_url="example string", + ) + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_tokens(self, client: Gradient) -> None: + response = client.agents.evaluation_metrics.oauth2.dropbox.with_raw_response.create_tokens() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + dropbox = response.parse() + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_tokens(self, client: Gradient) -> None: + with client.agents.evaluation_metrics.oauth2.dropbox.with_streaming_response.create_tokens() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + dropbox = response.parse() + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncDropbox: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_tokens(self, async_client: AsyncGradient) -> None: + dropbox = await async_client.agents.evaluation_metrics.oauth2.dropbox.create_tokens() + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_tokens_with_all_params(self, async_client: AsyncGradient) -> None: + dropbox = await async_client.agents.evaluation_metrics.oauth2.dropbox.create_tokens( + code="example string", + redirect_url="example string", + ) + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_tokens(self, async_client: AsyncGradient) -> None: + response = await async_client.agents.evaluation_metrics.oauth2.dropbox.with_raw_response.create_tokens() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + dropbox = await response.parse() + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_tokens(self, async_client: AsyncGradient) -> None: + async with ( + async_client.agents.evaluation_metrics.oauth2.dropbox.with_streaming_response.create_tokens() + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + dropbox = await response.parse() + assert_matches_type(DropboxCreateTokensResponse, dropbox, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/evaluation_metrics/test_oauth2.py b/tests/api_resources/agents/evaluation_metrics/test_oauth2.py new file mode 100644 index 00000000..f247d94f --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/test_oauth2.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types.agents.evaluation_metrics import Oauth2GenerateURLResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestOauth2: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_generate_url(self, client: Gradient) -> None: + oauth2 = client.agents.evaluation_metrics.oauth2.generate_url() + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_generate_url_with_all_params(self, client: Gradient) -> None: + oauth2 = client.agents.evaluation_metrics.oauth2.generate_url( + redirect_url="redirect_url", + type="type", + ) + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_generate_url(self, client: Gradient) -> None: + response = client.agents.evaluation_metrics.oauth2.with_raw_response.generate_url() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + oauth2 = response.parse() + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_generate_url(self, client: Gradient) -> None: + with client.agents.evaluation_metrics.oauth2.with_streaming_response.generate_url() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + oauth2 = response.parse() + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncOauth2: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_generate_url(self, async_client: AsyncGradient) -> None: + oauth2 = await async_client.agents.evaluation_metrics.oauth2.generate_url() + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_generate_url_with_all_params(self, async_client: AsyncGradient) -> None: + oauth2 = await async_client.agents.evaluation_metrics.oauth2.generate_url( + redirect_url="redirect_url", + type="type", + ) + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_generate_url(self, async_client: AsyncGradient) -> None: + response = await async_client.agents.evaluation_metrics.oauth2.with_raw_response.generate_url() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + oauth2 = await response.parse() + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_generate_url(self, async_client: AsyncGradient) -> None: + async with async_client.agents.evaluation_metrics.oauth2.with_streaming_response.generate_url() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + oauth2 = await response.parse() + assert_matches_type(Oauth2GenerateURLResponse, oauth2, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index dd4dbdc4..6d040d18 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -16,6 +16,7 @@ AgentUpdateResponse, AgentRetrieveResponse, AgentUpdateStatusResponse, + AgentRetrieveUsageResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -258,6 +259,58 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_retrieve_usage(self, client: Gradient) -> None: + agent = client.agents.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_retrieve_usage_with_all_params(self, client: Gradient) -> None: + agent = client.agents.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + start="start", + stop="stop", + ) + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_retrieve_usage(self, client: Gradient) -> None: + response = client.agents.with_raw_response.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_retrieve_usage(self, client: Gradient) -> None: + with client.agents.with_streaming_response.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_retrieve_usage(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.with_raw_response.retrieve_usage( + uuid="", + ) + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_status(self, client: Gradient) -> None: @@ -550,6 +603,58 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: "", ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_retrieve_usage(self, async_client: AsyncGradient) -> None: + agent = await async_client.agents.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_retrieve_usage_with_all_params(self, async_client: AsyncGradient) -> None: + agent = await async_client.agents.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + start="start", + stop="stop", + ) + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_retrieve_usage(self, async_client: AsyncGradient) -> None: + response = await async_client.agents.with_raw_response.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_retrieve_usage(self, async_client: AsyncGradient) -> None: + async with async_client.agents.with_streaming_response.retrieve_usage( + uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentRetrieveUsageResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_retrieve_usage(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.with_raw_response.retrieve_usage( + uuid="", + ) + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_status(self, async_client: AsyncGradient) -> None: From 0b5aacd84a34898ce8a076ead67275233ca3d2f2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 17 Sep 2025 03:17:20 +0000 Subject: [PATCH 169/200] chore(internal): update pydantic dependency --- requirements-dev.lock | 7 +++++-- requirements.lock | 7 +++++-- src/gradient/_models.py | 14 ++++++++++---- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index af44e06b..896c8c3a 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -88,9 +88,9 @@ pluggy==1.5.0 propcache==0.3.1 # via aiohttp # via yarl -pydantic==2.10.3 +pydantic==2.11.9 # via gradient -pydantic-core==2.27.1 +pydantic-core==2.33.2 # via pydantic pygments==2.18.0 # via rich @@ -126,6 +126,9 @@ typing-extensions==4.12.2 # via pydantic # via pydantic-core # via pyright + # via typing-inspection +typing-inspection==0.4.1 + # via pydantic virtualenv==20.24.5 # via nox yarl==1.20.0 diff --git a/requirements.lock b/requirements.lock index f9072669..1fce47a6 100644 --- a/requirements.lock +++ b/requirements.lock @@ -55,9 +55,9 @@ multidict==6.4.4 propcache==0.3.1 # via aiohttp # via yarl -pydantic==2.10.3 +pydantic==2.11.9 # via gradient -pydantic-core==2.27.1 +pydantic-core==2.33.2 # via pydantic sniffio==1.3.0 # via anyio @@ -68,5 +68,8 @@ typing-extensions==4.12.2 # via multidict # via pydantic # via pydantic-core + # via typing-inspection +typing-inspection==0.4.1 + # via pydantic yarl==1.20.0 # via aiohttp diff --git a/src/gradient/_models.py b/src/gradient/_models.py index 3a6017ef..6a3cd1d2 100644 --- a/src/gradient/_models.py +++ b/src/gradient/_models.py @@ -256,7 +256,7 @@ def model_dump( mode: Literal["json", "python"] | str = "python", include: IncEx | None = None, exclude: IncEx | None = None, - by_alias: bool = False, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, @@ -264,6 +264,7 @@ def model_dump( warnings: bool | Literal["none", "warn", "error"] = True, context: dict[str, Any] | None = None, serialize_as_any: bool = False, + fallback: Callable[[Any], Any] | None = None, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump @@ -295,10 +296,12 @@ def model_dump( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, - by_alias=by_alias, + by_alias=by_alias if by_alias is not None else False, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, @@ -313,13 +316,14 @@ def model_dump_json( indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, - by_alias: bool = False, + by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, context: dict[str, Any] | None = None, + fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> str: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json @@ -348,11 +352,13 @@ def model_dump_json( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") + if fallback is not None: + raise ValueError("fallback is only supported in Pydantic v2") return super().json( # type: ignore[reportDeprecated] indent=indent, include=include, exclude=exclude, - by_alias=by_alias, + by_alias=by_alias if by_alias is not None else False, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, From 073c145dc75ec670e020ca45393bf8df2754b0ca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 17 Sep 2025 16:38:21 +0000 Subject: [PATCH 170/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9dcd5cc8..3c4dbee7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.5" + ".": "3.0.0-beta.6" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5d84c1c3..14a56060 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.5" +version = "3.0.0-beta.6" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index c7adeab4..81080cc3 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.5" # x-release-please-version +__version__ = "3.0.0-beta.6" # x-release-please-version From 69032df16b45a5055f5a4bbfad9d51ce7277364b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 18 Sep 2025 17:55:21 +0000 Subject: [PATCH 171/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- README.md | 4 ++-- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3c4dbee7..4191c889 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.6" + ".": "3.0.0" } \ No newline at end of file diff --git a/README.md b/README.md index 8ae17135..2feb1f88 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ The REST API documentation can be found on [developers.digitalocean.com](https:/ ```sh # install from PyPI -pip install --pre gradient +pip install gradient ``` ## Usage @@ -93,7 +93,7 @@ You can enable this by installing `aiohttp`: ```sh # install from PyPI -pip install --pre gradient[aiohttp] +pip install gradient[aiohttp] ``` Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: diff --git a/pyproject.toml b/pyproject.toml index 14a56060..eed06559 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.6" +version = "3.0.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 81080cc3..17f05c22 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.6" # x-release-please-version +__version__ = "3.0.0" # x-release-please-version From 86f9415907e35b927662d3ca425537f97109a307 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 19 Sep 2025 03:40:22 +0000 Subject: [PATCH 172/200] chore(types): change optional parameter type from NotGiven to Omit --- src/gradient/__init__.py | 4 +- src/gradient/_base_client.py | 18 +- src/gradient/_client.py | 16 +- src/gradient/_qs.py | 14 +- src/gradient/_types.py | 29 +- src/gradient/_utils/_transform.py | 4 +- src/gradient/_utils/_utils.py | 8 +- src/gradient/resources/agents/agents.py | 166 ++--- src/gradient/resources/agents/api_keys.py | 50 +- .../resources/agents/chat/completions.py | 282 ++++---- .../resources/agents/evaluation_datasets.py | 22 +- .../evaluation_metrics/anthropic/keys.py | 62 +- .../evaluation_metrics/evaluation_metrics.py | 18 +- .../agents/evaluation_metrics/models.py | 22 +- .../evaluation_metrics/oauth2/dropbox.py | 14 +- .../evaluation_metrics/oauth2/oauth2.py | 14 +- .../agents/evaluation_metrics/openai/keys.py | 62 +- .../evaluation_metrics/workspaces/agents.py | 30 +- .../workspaces/workspaces.py | 50 +- .../resources/agents/evaluation_runs.py | 38 +- .../resources/agents/evaluation_test_cases.py | 78 +-- src/gradient/resources/agents/functions.py | 74 +-- .../resources/agents/knowledge_bases.py | 14 +- src/gradient/resources/agents/routes.py | 54 +- src/gradient/resources/agents/versions.py | 26 +- src/gradient/resources/chat/completions.py | 282 ++++---- .../databases/schema_registry/config.py | 18 +- .../resources/gpu_droplets/account/keys.py | 34 +- .../resources/gpu_droplets/actions.py | 150 ++--- .../resources/gpu_droplets/autoscale.py | 62 +- .../resources/gpu_droplets/backups.py | 34 +- .../destroy_with_associated_resources.py | 42 +- .../gpu_droplets/firewalls/droplets.py | 10 +- .../gpu_droplets/firewalls/firewalls.py | 34 +- .../resources/gpu_droplets/firewalls/rules.py | 26 +- .../resources/gpu_droplets/firewalls/tags.py | 10 +- .../gpu_droplets/floating_ips/actions.py | 26 +- .../gpu_droplets/floating_ips/floating_ips.py | 50 +- .../resources/gpu_droplets/gpu_droplets.py | 242 +++---- .../resources/gpu_droplets/images/actions.py | 26 +- .../resources/gpu_droplets/images/images.py | 78 +-- .../gpu_droplets/load_balancers/droplets.py | 10 +- .../load_balancers/forwarding_rules.py | 10 +- .../load_balancers/load_balancers.py | 610 +++++++++--------- src/gradient/resources/gpu_droplets/sizes.py | 14 +- .../resources/gpu_droplets/snapshots.py | 26 +- .../resources/gpu_droplets/volumes/actions.py | 162 ++--- .../gpu_droplets/volumes/snapshots.py | 30 +- .../resources/gpu_droplets/volumes/volumes.py | 114 ++-- src/gradient/resources/inference/api_keys.py | 42 +- .../resources/knowledge_bases/data_sources.py | 38 +- .../knowledge_bases/indexing_jobs.py | 42 +- .../knowledge_bases/knowledge_bases.py | 86 +-- src/gradient/resources/models/models.py | 22 +- .../resources/models/providers/anthropic.py | 62 +- .../resources/models/providers/openai.py | 62 +- src/gradient/resources/regions.py | 14 +- tests/test_transform.py | 11 +- 58 files changed, 1832 insertions(+), 1816 deletions(-) diff --git a/src/gradient/__init__.py b/src/gradient/__init__.py index c78eff30..a67cd2a7 100644 --- a/src/gradient/__init__.py +++ b/src/gradient/__init__.py @@ -3,7 +3,7 @@ import typing as _t from . import types -from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes +from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given from ._utils import file_from_path from ._client import ( Client, @@ -48,7 +48,9 @@ "ProxiesTypes", "NotGiven", "NOT_GIVEN", + "not_given", "Omit", + "omit", "GradientError", "APIError", "APIStatusError", diff --git a/src/gradient/_base_client.py b/src/gradient/_base_client.py index 58b14617..7d92e5bc 100644 --- a/src/gradient/_base_client.py +++ b/src/gradient/_base_client.py @@ -42,7 +42,6 @@ from ._qs import Querystring from ._files import to_httpx_files, async_to_httpx_files from ._types import ( - NOT_GIVEN, Body, Omit, Query, @@ -57,6 +56,7 @@ RequestOptions, HttpxRequestFiles, ModelBuilderProtocol, + not_given, ) from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping from ._compat import PYDANTIC_V1, model_copy, model_dump @@ -145,9 +145,9 @@ def __init__( def __init__( self, *, - url: URL | NotGiven = NOT_GIVEN, - json: Body | NotGiven = NOT_GIVEN, - params: Query | NotGiven = NOT_GIVEN, + url: URL | NotGiven = not_given, + json: Body | NotGiven = not_given, + params: Query | NotGiven = not_given, ) -> None: self.url = url self.json = json @@ -595,7 +595,7 @@ def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalReques # we internally support defining a temporary header to override the # default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response` # see _response.py for implementation details - override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN) + override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given) if is_given(override_cast_to): options.headers = headers return cast(Type[ResponseT], override_cast_to) @@ -825,7 +825,7 @@ def __init__( version: str, base_url: str | URL, max_retries: int = DEFAULT_MAX_RETRIES, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.Client | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, @@ -1356,7 +1356,7 @@ def __init__( base_url: str | URL, _strict_response_validation: bool, max_retries: int = DEFAULT_MAX_RETRIES, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.AsyncClient | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, @@ -1818,8 +1818,8 @@ def make_request_options( extra_query: Query | None = None, extra_body: Body | None = None, idempotency_key: str | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - post_parser: PostParser | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + post_parser: PostParser | NotGiven = not_given, ) -> RequestOptions: """Create a dict of type RequestOptions without keys of NotGiven values.""" options: RequestOptions = {} diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 5343a6eb..a215f629 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Union, Mapping +from typing import TYPE_CHECKING, Any, Mapping from typing_extensions import Self, override import httpx @@ -11,7 +11,6 @@ from . import _exceptions from ._qs import Querystring from ._types import ( - NOT_GIVEN, Omit, Headers, Timeout, @@ -19,6 +18,7 @@ Transport, ProxiesTypes, RequestOptions, + not_given, ) from ._utils import is_given, get_async_library from ._compat import cached_property @@ -71,7 +71,7 @@ def __init__( agent_endpoint: str | None = None, inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, - timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -235,9 +235,9 @@ def copy( agent_endpoint: str | None = None, inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.Client | None = None, - max_retries: int | NotGiven = NOT_GIVEN, + max_retries: int | NotGiven = not_given, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -338,7 +338,7 @@ def __init__( agent_endpoint: str | None = None, inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, - timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, @@ -502,9 +502,9 @@ def copy( agent_endpoint: str | None = None, inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, - timeout: float | Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | Timeout | None | NotGiven = not_given, http_client: httpx.AsyncClient | None = None, - max_retries: int | NotGiven = NOT_GIVEN, + max_retries: int | NotGiven = not_given, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, diff --git a/src/gradient/_qs.py b/src/gradient/_qs.py index 274320ca..ada6fd3f 100644 --- a/src/gradient/_qs.py +++ b/src/gradient/_qs.py @@ -4,7 +4,7 @@ from urllib.parse import parse_qs, urlencode from typing_extensions import Literal, get_args -from ._types import NOT_GIVEN, NotGiven, NotGivenOr +from ._types import NotGiven, not_given from ._utils import flatten _T = TypeVar("_T") @@ -41,8 +41,8 @@ def stringify( self, params: Params, *, - array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, - nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, ) -> str: return urlencode( self.stringify_items( @@ -56,8 +56,8 @@ def stringify_items( self, params: Params, *, - array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, - nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, ) -> list[tuple[str, str]]: opts = Options( qs=self, @@ -143,8 +143,8 @@ def __init__( self, qs: Querystring = _qs, *, - array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, - nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, + array_format: ArrayFormat | NotGiven = not_given, + nested_format: NestedFormat | NotGiven = not_given, ) -> None: self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format diff --git a/src/gradient/_types.py b/src/gradient/_types.py index 32375713..11a40997 100644 --- a/src/gradient/_types.py +++ b/src/gradient/_types.py @@ -117,18 +117,21 @@ class RequestOptions(TypedDict, total=False): # Sentinel class used until PEP 0661 is accepted class NotGiven: """ - A sentinel singleton class used to distinguish omitted keyword arguments - from those passed in with the value None (which may have different behavior). + For parameters with a meaningful None value, we need to distinguish between + the user explicitly passing None, and the user not passing the parameter at + all. + + User code shouldn't need to use not_given directly. For example: ```py - def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... + def create(timeout: Timeout | None | NotGiven = not_given): ... - get(timeout=1) # 1s timeout - get(timeout=None) # No timeout - get() # Default timeout behavior, which may not be statically known at the method definition. + create(timeout=1) # 1s timeout + create(timeout=None) # No timeout + create() # Default timeout behavior ``` """ @@ -140,13 +143,14 @@ def __repr__(self) -> str: return "NOT_GIVEN" -NotGivenOr = Union[_T, NotGiven] +not_given = NotGiven() +# for backwards compatibility: NOT_GIVEN = NotGiven() class Omit: - """In certain situations you need to be able to represent a case where a default value has - to be explicitly removed and `None` is not an appropriate substitute, for example: + """ + To explicitly omit something from being sent in a request, use `omit`. ```py # as the default `Content-Type` header is `application/json` that will be sent @@ -156,8 +160,8 @@ class Omit: # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983' client.post(..., headers={"Content-Type": "multipart/form-data"}) - # instead you can remove the default `application/json` header by passing Omit - client.post(..., headers={"Content-Type": Omit()}) + # instead you can remove the default `application/json` header by passing omit + client.post(..., headers={"Content-Type": omit}) ``` """ @@ -165,6 +169,9 @@ def __bool__(self) -> Literal[False]: return False +omit = Omit() + + @runtime_checkable class ModelBuilderProtocol(Protocol): @classmethod diff --git a/src/gradient/_utils/_transform.py b/src/gradient/_utils/_transform.py index c19124f0..52075492 100644 --- a/src/gradient/_utils/_transform.py +++ b/src/gradient/_utils/_transform.py @@ -268,7 +268,7 @@ def _transform_typeddict( annotations = get_type_hints(expected_type, include_extras=True) for key, value in data.items(): if not is_given(value): - # we don't need to include `NotGiven` values here as they'll + # we don't need to include omitted values here as they'll # be stripped out before the request is sent anyway continue @@ -434,7 +434,7 @@ async def _async_transform_typeddict( annotations = get_type_hints(expected_type, include_extras=True) for key, value in data.items(): if not is_given(value): - # we don't need to include `NotGiven` values here as they'll + # we don't need to include omitted values here as they'll # be stripped out before the request is sent anyway continue diff --git a/src/gradient/_utils/_utils.py b/src/gradient/_utils/_utils.py index f0818595..50d59269 100644 --- a/src/gradient/_utils/_utils.py +++ b/src/gradient/_utils/_utils.py @@ -21,7 +21,7 @@ import sniffio -from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike +from .._types import Omit, NotGiven, FileTypes, HeadersLike _T = TypeVar("_T") _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) @@ -63,7 +63,7 @@ def _extract_items( try: key = path[index] except IndexError: - if isinstance(obj, NotGiven): + if not is_given(obj): # no value was provided - we can safely ignore return [] @@ -126,8 +126,8 @@ def _extract_items( return [] -def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]: - return not isinstance(obj, NotGiven) +def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]: + return not isinstance(obj, NotGiven) and not isinstance(obj, Omit) # Type safe methods for narrowing types with TypeVars. diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py index 590b9a76..a4d32fca 100644 --- a/src/gradient/resources/agents/agents.py +++ b/src/gradient/resources/agents/agents.py @@ -21,7 +21,7 @@ agent_update_status_params, agent_retrieve_usage_params, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from .api_keys import ( APIKeysResource, @@ -180,22 +180,22 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: def create( self, *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + anthropic_key_uuid: str | Omit = omit, + description: str | Omit = omit, + instruction: str | Omit = omit, + knowledge_base_uuid: SequenceNotStr[str] | Omit = omit, + model_uuid: str | Omit = omit, + name: str | Omit = omit, + openai_key_uuid: str | Omit = omit, + project_id: str | Omit = omit, + region: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. @@ -268,7 +268,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. @@ -300,29 +300,29 @@ def update( self, path_uuid: str, *, - agent_log_insights_enabled: bool | NotGiven = NOT_GIVEN, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - k: int | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - provide_citations: bool | NotGiven = NOT_GIVEN, - retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - top_p: float | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, + agent_log_insights_enabled: bool | Omit = omit, + anthropic_key_uuid: str | Omit = omit, + conversation_logs_enabled: bool | Omit = omit, + description: str | Omit = omit, + instruction: str | Omit = omit, + k: int | Omit = omit, + max_tokens: int | Omit = omit, + model_uuid: str | Omit = omit, + name: str | Omit = omit, + openai_key_uuid: str | Omit = omit, + project_id: str | Omit = omit, + provide_citations: bool | Omit = omit, + retrieval_method: APIRetrievalMethod | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + temperature: float | Omit = omit, + top_p: float | Omit = omit, + body_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. @@ -418,15 +418,15 @@ def update( def list( self, *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + only_deployed: bool | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -476,7 +476,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -506,14 +506,14 @@ def retrieve_usage( self, uuid: str, *, - start: str | NotGiven = NOT_GIVEN, - stop: str | NotGiven = NOT_GIVEN, + start: str | Omit = omit, + stop: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentRetrieveUsageResponse: """ To get agent usage, send a GET request to `/v2/gen-ai/agents/{uuid}/usage`. @@ -559,14 +559,14 @@ def update_status( self, path_uuid: str, *, - body_uuid: str | NotGiven = NOT_GIVEN, - visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + body_uuid: str | Omit = omit, + visibility: APIDeploymentVisibility | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentUpdateStatusResponse: """Check whether an agent is public or private. @@ -676,22 +676,22 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: async def create( self, *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + anthropic_key_uuid: str | Omit = omit, + description: str | Omit = omit, + instruction: str | Omit = omit, + knowledge_base_uuid: SequenceNotStr[str] | Omit = omit, + model_uuid: str | Omit = omit, + name: str | Omit = omit, + openai_key_uuid: str | Omit = omit, + project_id: str | Omit = omit, + region: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. @@ -764,7 +764,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. @@ -796,29 +796,29 @@ async def update( self, path_uuid: str, *, - agent_log_insights_enabled: bool | NotGiven = NOT_GIVEN, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - k: int | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - provide_citations: bool | NotGiven = NOT_GIVEN, - retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - top_p: float | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, + agent_log_insights_enabled: bool | Omit = omit, + anthropic_key_uuid: str | Omit = omit, + conversation_logs_enabled: bool | Omit = omit, + description: str | Omit = omit, + instruction: str | Omit = omit, + k: int | Omit = omit, + max_tokens: int | Omit = omit, + model_uuid: str | Omit = omit, + name: str | Omit = omit, + openai_key_uuid: str | Omit = omit, + project_id: str | Omit = omit, + provide_citations: bool | Omit = omit, + retrieval_method: APIRetrievalMethod | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + temperature: float | Omit = omit, + top_p: float | Omit = omit, + body_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. @@ -914,15 +914,15 @@ async def update( async def list( self, *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + only_deployed: bool | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -972,7 +972,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -1002,14 +1002,14 @@ async def retrieve_usage( self, uuid: str, *, - start: str | NotGiven = NOT_GIVEN, - stop: str | NotGiven = NOT_GIVEN, + start: str | Omit = omit, + stop: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentRetrieveUsageResponse: """ To get agent usage, send a GET request to `/v2/gen-ai/agents/{uuid}/usage`. @@ -1055,14 +1055,14 @@ async def update_status( self, path_uuid: str, *, - body_uuid: str | NotGiven = NOT_GIVEN, - visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + body_uuid: str | Omit = omit, + visibility: APIDeploymentVisibility | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentUpdateStatusResponse: """Check whether an agent is public or private. diff --git a/src/gradient/resources/agents/api_keys.py b/src/gradient/resources/agents/api_keys.py index 7e9feb51..174ebf60 100644 --- a/src/gradient/resources/agents/api_keys.py +++ b/src/gradient/resources/agents/api_keys.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -49,14 +49,14 @@ def create( self, path_agent_uuid: str, *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyCreateResponse: """ To create an agent API key, send a POST request to @@ -99,15 +99,15 @@ def update( path_api_key_uuid: str, *, path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyUpdateResponse: """ To update an agent API key, send a PUT request to @@ -154,14 +154,14 @@ def list( self, agent_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyListResponse: """ To list all agent API keys, send a GET request to @@ -212,7 +212,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyDeleteResponse: """ To delete an API key for an agent, send a DELETE request to @@ -251,7 +251,7 @@ def regenerate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyRegenerateResponse: """ To regenerate an agent API key, send a PUT request to @@ -305,14 +305,14 @@ async def create( self, path_agent_uuid: str, *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyCreateResponse: """ To create an agent API key, send a POST request to @@ -355,15 +355,15 @@ async def update( path_api_key_uuid: str, *, path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyUpdateResponse: """ To update an agent API key, send a PUT request to @@ -410,14 +410,14 @@ async def list( self, agent_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyListResponse: """ To list all agent API keys, send a GET request to @@ -468,7 +468,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyDeleteResponse: """ To delete an API key for an agent, send a DELETE request to @@ -507,7 +507,7 @@ async def regenerate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyRegenerateResponse: """ To regenerate an agent API key, send a PUT request to diff --git a/src/gradient/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py index 0d134389..53c13f21 100644 --- a/src/gradient/resources/agents/chat/completions.py +++ b/src/gradient/resources/agents/chat/completions.py @@ -7,7 +7,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -52,29 +52,29 @@ def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse: """ Creates a model response for the given chat conversation. @@ -181,28 +181,28 @@ def create( messages: Iterable[completion_create_params.Message], model: str, stream: Literal[True], - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -309,28 +309,28 @@ def create( messages: Iterable[completion_create_params.Message], model: str, stream: bool, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -436,29 +436,29 @@ def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: return self._post( "/chat/completions?agent=true" @@ -525,29 +525,29 @@ async def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse: """ Creates a model response for the given chat conversation. @@ -654,28 +654,28 @@ async def create( messages: Iterable[completion_create_params.Message], model: str, stream: Literal[True], - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -782,28 +782,28 @@ async def create( messages: Iterable[completion_create_params.Message], model: str, stream: bool, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -909,29 +909,29 @@ async def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions?agent=true" diff --git a/src/gradient/resources/agents/evaluation_datasets.py b/src/gradient/resources/agents/evaluation_datasets.py index d8e960de..0f9631ba 100644 --- a/src/gradient/resources/agents/evaluation_datasets.py +++ b/src/gradient/resources/agents/evaluation_datasets.py @@ -6,7 +6,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -53,14 +53,14 @@ def with_streaming_response(self) -> EvaluationDatasetsResourceWithStreamingResp def create( self, *, - file_upload_dataset: APIFileUploadDataSourceParam | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + file_upload_dataset: APIFileUploadDataSourceParam | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationDatasetCreateResponse: """ To create an evaluation dataset, send a POST request to @@ -99,13 +99,13 @@ def create( def create_file_upload_presigned_urls( self, *, - files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | NotGiven = NOT_GIVEN, + files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse: """ To create presigned URLs for evaluation dataset file upload, send a POST request @@ -160,14 +160,14 @@ def with_streaming_response(self) -> AsyncEvaluationDatasetsResourceWithStreamin async def create( self, *, - file_upload_dataset: APIFileUploadDataSourceParam | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + file_upload_dataset: APIFileUploadDataSourceParam | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationDatasetCreateResponse: """ To create an evaluation dataset, send a POST request to @@ -206,13 +206,13 @@ async def create( async def create_file_upload_presigned_urls( self, *, - files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | NotGiven = NOT_GIVEN, + files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse: """ To create presigned URLs for evaluation dataset file upload, send a POST request diff --git a/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py b/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py index 6111bf6f..e015bf5c 100644 --- a/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py +++ b/src/gradient/resources/agents/evaluation_metrics/anthropic/keys.py @@ -4,7 +4,7 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -54,14 +54,14 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyCreateResponse: """ To create an Anthropic API key, send a POST request to @@ -106,7 +106,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyRetrieveResponse: """ To retrieve details of an Anthropic API key, send a GET request to @@ -137,15 +137,15 @@ def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyUpdateResponse: """ To update an Anthropic API key, send a PUT request to @@ -189,14 +189,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListResponse: """ To list all Anthropic API keys, send a GET request to @@ -244,7 +244,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyDeleteResponse: """ To delete an Anthropic API key, send a DELETE request to @@ -275,14 +275,14 @@ def list_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListAgentsResponse: """ List Agents by Anthropic Key. @@ -346,14 +346,14 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: async def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyCreateResponse: """ To create an Anthropic API key, send a POST request to @@ -398,7 +398,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyRetrieveResponse: """ To retrieve details of an Anthropic API key, send a GET request to @@ -429,15 +429,15 @@ async def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyUpdateResponse: """ To update an Anthropic API key, send a PUT request to @@ -481,14 +481,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListResponse: """ To list all Anthropic API keys, send a GET request to @@ -536,7 +536,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyDeleteResponse: """ To delete an Anthropic API key, send a DELETE request to @@ -567,14 +567,14 @@ async def list_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListAgentsResponse: """ List Agents by Anthropic Key. diff --git a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py index b9e1386b..43c1aa9b 100644 --- a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -12,7 +12,7 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -110,7 +110,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationMetricListResponse: """ To list all evaluation metrics, send a GET request to @@ -129,14 +129,14 @@ def list( def list_regions( self, *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, + serves_batch: bool | Omit = omit, + serves_inference: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationMetricListRegionsResponse: """ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. @@ -223,7 +223,7 @@ async def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationMetricListResponse: """ To list all evaluation metrics, send a GET request to @@ -242,14 +242,14 @@ async def list( async def list_regions( self, *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, + serves_batch: bool | Omit = omit, + serves_inference: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationMetricListRegionsResponse: """ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. diff --git a/src/gradient/resources/agents/evaluation_metrics/models.py b/src/gradient/resources/agents/evaluation_metrics/models.py index 1902a4f0..7728e662 100644 --- a/src/gradient/resources/agents/evaluation_metrics/models.py +++ b/src/gradient/resources/agents/evaluation_metrics/models.py @@ -7,7 +7,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -47,9 +47,9 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + public_only: bool | Omit = omit, usecases: List[ Literal[ "MODEL_USECASE_UNKNOWN", @@ -61,13 +61,13 @@ def list( "MODEL_USECASE_SERVERLESS", ] ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModelListResponse: """ To list all models, send a GET request to `/v2/gen-ai/models`. @@ -144,9 +144,9 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + public_only: bool | Omit = omit, usecases: List[ Literal[ "MODEL_USECASE_UNKNOWN", @@ -158,13 +158,13 @@ async def list( "MODEL_USECASE_SERVERLESS", ] ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModelListResponse: """ To list all models, send a GET request to `/v2/gen-ai/models`. diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py index caa7d0d3..256040ba 100644 --- a/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py +++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/dropbox.py @@ -4,7 +4,7 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -44,14 +44,14 @@ def with_streaming_response(self) -> DropboxResourceWithStreamingResponse: def create_tokens( self, *, - code: str | NotGiven = NOT_GIVEN, - redirect_url: str | NotGiven = NOT_GIVEN, + code: str | Omit = omit, + redirect_url: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DropboxCreateTokensResponse: """ To obtain the refresh token, needed for creation of data sources, send a GET @@ -112,14 +112,14 @@ def with_streaming_response(self) -> AsyncDropboxResourceWithStreamingResponse: async def create_tokens( self, *, - code: str | NotGiven = NOT_GIVEN, - redirect_url: str | NotGiven = NOT_GIVEN, + code: str | Omit = omit, + redirect_url: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DropboxCreateTokensResponse: """ To obtain the refresh token, needed for creation of data sources, send a GET diff --git a/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py b/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py index 8063ce5a..335e58d7 100644 --- a/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py +++ b/src/gradient/resources/agents/evaluation_metrics/oauth2/oauth2.py @@ -12,7 +12,7 @@ DropboxResourceWithStreamingResponse, AsyncDropboxResourceWithStreamingResponse, ) -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -56,14 +56,14 @@ def with_streaming_response(self) -> Oauth2ResourceWithStreamingResponse: def generate_url( self, *, - redirect_url: str | NotGiven = NOT_GIVEN, - type: str | NotGiven = NOT_GIVEN, + redirect_url: str | Omit = omit, + type: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Oauth2GenerateURLResponse: """ To generate an Oauth2-URL for use with your localhost, send a GET request to @@ -130,14 +130,14 @@ def with_streaming_response(self) -> AsyncOauth2ResourceWithStreamingResponse: async def generate_url( self, *, - redirect_url: str | NotGiven = NOT_GIVEN, - type: str | NotGiven = NOT_GIVEN, + redirect_url: str | Omit = omit, + type: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Oauth2GenerateURLResponse: """ To generate an Oauth2-URL for use with your localhost, send a GET request to diff --git a/src/gradient/resources/agents/evaluation_metrics/openai/keys.py b/src/gradient/resources/agents/evaluation_metrics/openai/keys.py index 00131691..9ab5cbad 100644 --- a/src/gradient/resources/agents/evaluation_metrics/openai/keys.py +++ b/src/gradient/resources/agents/evaluation_metrics/openai/keys.py @@ -4,7 +4,7 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ....._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -54,14 +54,14 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyCreateResponse: """ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. @@ -105,7 +105,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyRetrieveResponse: """ To retrieve details of an OpenAI API key, send a GET request to @@ -136,15 +136,15 @@ def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyUpdateResponse: """ To update an OpenAI API key, send a PUT request to @@ -188,14 +188,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListResponse: """ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. @@ -242,7 +242,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyDeleteResponse: """ To delete an OpenAI API key, send a DELETE request to @@ -273,14 +273,14 @@ def list_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListAgentsResponse: """ List Agents by OpenAI Key. @@ -344,14 +344,14 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: async def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyCreateResponse: """ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. @@ -395,7 +395,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyRetrieveResponse: """ To retrieve details of an OpenAI API key, send a GET request to @@ -426,15 +426,15 @@ async def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyUpdateResponse: """ To update an OpenAI API key, send a PUT request to @@ -478,14 +478,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListResponse: """ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. @@ -532,7 +532,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyDeleteResponse: """ To delete an OpenAI API key, send a DELETE request to @@ -563,14 +563,14 @@ async def list_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListAgentsResponse: """ List Agents by OpenAI Key. diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py index 408396b1..7f9a766a 100644 --- a/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py +++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/agents.py @@ -4,7 +4,7 @@ import httpx -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -46,15 +46,15 @@ def list( self, workspace_uuid: str, *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + only_deployed: bool | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentListResponse: """ To list all agents by a Workspace, send a GET request to @@ -102,14 +102,14 @@ def move( self, path_workspace_uuid: str, *, - agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - body_workspace_uuid: str | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | Omit = omit, + body_workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentMoveResponse: """ To move all listed agents a given workspace, send a PUT request to @@ -174,15 +174,15 @@ async def list( self, workspace_uuid: str, *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + only_deployed: bool | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentListResponse: """ To list all agents by a Workspace, send a GET request to @@ -230,14 +230,14 @@ async def move( self, path_workspace_uuid: str, *, - agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - body_workspace_uuid: str | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | Omit = omit, + body_workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AgentMoveResponse: """ To move all listed agents a given workspace, send a PUT request to diff --git a/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py index e6f610ef..73539bbd 100644 --- a/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py +++ b/src/gradient/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -12,7 +12,7 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) -from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ....._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ....._utils import maybe_transform, async_maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource @@ -63,15 +63,15 @@ def with_streaming_response(self) -> WorkspacesResourceWithStreamingResponse: def create( self, *, - agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | Omit = omit, + description: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceCreateResponse: """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`. @@ -120,7 +120,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceRetrieveResponse: """ To retrieve details of a workspace, GET request to @@ -152,15 +152,15 @@ def update( self, path_workspace_uuid: str, *, - description: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - body_workspace_uuid: str | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + name: str | Omit = omit, + body_workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceUpdateResponse: """ To update a workspace, send a PUT request to @@ -212,7 +212,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceListResponse: """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`.""" return self._get( @@ -234,7 +234,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceDeleteResponse: """ To delete a workspace, send a DELETE request to @@ -270,7 +270,7 @@ def list_evaluation_test_cases( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceListEvaluationTestCasesResponse: """ To list all evaluation test cases by a workspace, send a GET request to @@ -325,15 +325,15 @@ def with_streaming_response(self) -> AsyncWorkspacesResourceWithStreamingRespons async def create( self, *, - agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | Omit = omit, + description: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceCreateResponse: """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`. @@ -382,7 +382,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceRetrieveResponse: """ To retrieve details of a workspace, GET request to @@ -414,15 +414,15 @@ async def update( self, path_workspace_uuid: str, *, - description: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - body_workspace_uuid: str | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + name: str | Omit = omit, + body_workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceUpdateResponse: """ To update a workspace, send a PUT request to @@ -474,7 +474,7 @@ async def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceListResponse: """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`.""" return await self._get( @@ -496,7 +496,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceDeleteResponse: """ To delete a workspace, send a DELETE request to @@ -532,7 +532,7 @@ async def list_evaluation_test_cases( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> WorkspaceListEvaluationTestCasesResponse: """ To list all evaluation test cases by a workspace, send a GET request to diff --git a/src/gradient/resources/agents/evaluation_runs.py b/src/gradient/resources/agents/evaluation_runs.py index e00c9eb3..8506b00f 100644 --- a/src/gradient/resources/agents/evaluation_runs.py +++ b/src/gradient/resources/agents/evaluation_runs.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -47,15 +47,15 @@ def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse def create( self, *, - agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - run_name: str | NotGiven = NOT_GIVEN, - test_case_uuid: str | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | Omit = omit, + run_name: str | Omit = omit, + test_case_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunCreateResponse: """ To run an evaluation test case, send a POST request to @@ -103,7 +103,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunRetrieveResponse: """ To retrive information about an existing evaluation run, send a GET request to @@ -136,14 +136,14 @@ def list_results( self, evaluation_run_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunListResultsResponse: """ To retrieve results of an evaluation run, send a GET request to @@ -196,7 +196,7 @@ def retrieve_results( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunRetrieveResultsResponse: """ To retrieve results of an evaluation run, send a GET request to @@ -249,15 +249,15 @@ def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingRes async def create( self, *, - agent_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - run_name: str | NotGiven = NOT_GIVEN, - test_case_uuid: str | NotGiven = NOT_GIVEN, + agent_uuids: SequenceNotStr[str] | Omit = omit, + run_name: str | Omit = omit, + test_case_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunCreateResponse: """ To run an evaluation test case, send a POST request to @@ -305,7 +305,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunRetrieveResponse: """ To retrive information about an existing evaluation run, send a GET request to @@ -338,14 +338,14 @@ async def list_results( self, evaluation_run_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunListResultsResponse: """ To retrieve results of an evaluation run, send a GET request to @@ -398,7 +398,7 @@ async def retrieve_results( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationRunRetrieveResultsResponse: """ To retrieve results of an evaluation run, send a GET request to diff --git a/src/gradient/resources/agents/evaluation_test_cases.py b/src/gradient/resources/agents/evaluation_test_cases.py index 07f0a251..d53b8c26 100644 --- a/src/gradient/resources/agents/evaluation_test_cases.py +++ b/src/gradient/resources/agents/evaluation_test_cases.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -56,18 +56,18 @@ def with_streaming_response(self) -> EvaluationTestCasesResourceWithStreamingRes def create( self, *, - dataset_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - metrics: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, - workspace_uuid: str | NotGiven = NOT_GIVEN, + dataset_uuid: str | Omit = omit, + description: str | Omit = omit, + metrics: SequenceNotStr[str] | Omit = omit, + name: str | Omit = omit, + star_metric: APIStarMetricParam | Omit = omit, + workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseCreateResponse: """ To create an evaluation test-case send a POST request to @@ -117,13 +117,13 @@ def retrieve( self, test_case_uuid: str, *, - evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + evaluation_test_case_version: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseRetrieveResponse: """ To retrive information about an existing evaluation test case, send a GET @@ -163,18 +163,18 @@ def update( self, path_test_case_uuid: str, *, - dataset_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - metrics: evaluation_test_case_update_params.Metrics | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, - body_test_case_uuid: str | NotGiven = NOT_GIVEN, + dataset_uuid: str | Omit = omit, + description: str | Omit = omit, + metrics: evaluation_test_case_update_params.Metrics | Omit = omit, + name: str | Omit = omit, + star_metric: APIStarMetricParam | Omit = omit, + body_test_case_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseUpdateResponse: """ To update an evaluation test-case send a PUT request to @@ -230,7 +230,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseListResponse: """ To list all evaluation test cases, send a GET request to @@ -250,13 +250,13 @@ def list_evaluation_runs( self, evaluation_test_case_uuid: str, *, - evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + evaluation_test_case_version: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseListEvaluationRunsResponse: """ To list all evaluation runs by test case, send a GET request to @@ -318,18 +318,18 @@ def with_streaming_response(self) -> AsyncEvaluationTestCasesResourceWithStreami async def create( self, *, - dataset_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - metrics: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, - workspace_uuid: str | NotGiven = NOT_GIVEN, + dataset_uuid: str | Omit = omit, + description: str | Omit = omit, + metrics: SequenceNotStr[str] | Omit = omit, + name: str | Omit = omit, + star_metric: APIStarMetricParam | Omit = omit, + workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseCreateResponse: """ To create an evaluation test-case send a POST request to @@ -379,13 +379,13 @@ async def retrieve( self, test_case_uuid: str, *, - evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + evaluation_test_case_version: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseRetrieveResponse: """ To retrive information about an existing evaluation test case, send a GET @@ -425,18 +425,18 @@ async def update( self, path_test_case_uuid: str, *, - dataset_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - metrics: evaluation_test_case_update_params.Metrics | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, - body_test_case_uuid: str | NotGiven = NOT_GIVEN, + dataset_uuid: str | Omit = omit, + description: str | Omit = omit, + metrics: evaluation_test_case_update_params.Metrics | Omit = omit, + name: str | Omit = omit, + star_metric: APIStarMetricParam | Omit = omit, + body_test_case_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseUpdateResponse: """ To update an evaluation test-case send a PUT request to @@ -492,7 +492,7 @@ async def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseListResponse: """ To list all evaluation test cases, send a GET request to @@ -512,13 +512,13 @@ async def list_evaluation_runs( self, evaluation_test_case_uuid: str, *, - evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + evaluation_test_case_version: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> EvaluationTestCaseListEvaluationRunsResponse: """ To list all evaluation runs by test case, send a GET request to diff --git a/src/gradient/resources/agents/functions.py b/src/gradient/resources/agents/functions.py index 7986f750..3d995d24 100644 --- a/src/gradient/resources/agents/functions.py +++ b/src/gradient/resources/agents/functions.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -47,19 +47,19 @@ def create( self, path_agent_uuid: str, *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + description: str | Omit = omit, + faas_name: str | Omit = omit, + faas_namespace: str | Omit = omit, + function_name: str | Omit = omit, + input_schema: object | Omit = omit, + output_schema: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FunctionCreateResponse: """ To create a function route for an agent, send a POST request to @@ -117,20 +117,20 @@ def update( path_function_uuid: str, *, path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - body_function_uuid: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + description: str | Omit = omit, + faas_name: str | Omit = omit, + faas_namespace: str | Omit = omit, + function_name: str | Omit = omit, + body_function_uuid: str | Omit = omit, + input_schema: object | Omit = omit, + output_schema: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FunctionUpdateResponse: """ To update the function route, send a PUT request to @@ -198,7 +198,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FunctionDeleteResponse: """ To delete a function route from an agent, send a DELETE request to @@ -252,19 +252,19 @@ async def create( self, path_agent_uuid: str, *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + description: str | Omit = omit, + faas_name: str | Omit = omit, + faas_namespace: str | Omit = omit, + function_name: str | Omit = omit, + input_schema: object | Omit = omit, + output_schema: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FunctionCreateResponse: """ To create a function route for an agent, send a POST request to @@ -322,20 +322,20 @@ async def update( path_function_uuid: str, *, path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - body_function_uuid: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, + body_agent_uuid: str | Omit = omit, + description: str | Omit = omit, + faas_name: str | Omit = omit, + faas_namespace: str | Omit = omit, + function_name: str | Omit = omit, + body_function_uuid: str | Omit = omit, + input_schema: object | Omit = omit, + output_schema: object | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FunctionUpdateResponse: """ To update the function route, send a PUT request to @@ -403,7 +403,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FunctionDeleteResponse: """ To delete a function route from an agent, send a DELETE request to diff --git a/src/gradient/resources/agents/knowledge_bases.py b/src/gradient/resources/agents/knowledge_bases.py index 1664ee84..deefd123 100644 --- a/src/gradient/resources/agents/knowledge_bases.py +++ b/src/gradient/resources/agents/knowledge_bases.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Query, Headers, NotGiven, not_given from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -49,7 +49,7 @@ def attach( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APILinkKnowledgeBaseOutput: """ To attach knowledge bases to an agent, send a POST request to @@ -86,7 +86,7 @@ def attach_single( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APILinkKnowledgeBaseOutput: """ To attach a knowledge base to an agent, send a POST request to @@ -127,7 +127,7 @@ def detach( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseDetachResponse: """ To detach a knowledge base from an agent, send a DELETE request to @@ -188,7 +188,7 @@ async def attach( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APILinkKnowledgeBaseOutput: """ To attach knowledge bases to an agent, send a POST request to @@ -225,7 +225,7 @@ async def attach_single( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APILinkKnowledgeBaseOutput: """ To attach a knowledge base to an agent, send a POST request to @@ -266,7 +266,7 @@ async def detach( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseDetachResponse: """ To detach a knowledge base from an agent, send a DELETE request to diff --git a/src/gradient/resources/agents/routes.py b/src/gradient/resources/agents/routes.py index 1007b08f..dc37b7d2 100644 --- a/src/gradient/resources/agents/routes.py +++ b/src/gradient/resources/agents/routes.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -49,17 +49,17 @@ def update( path_child_agent_uuid: str, *, path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - uuid: str | NotGiven = NOT_GIVEN, + body_child_agent_uuid: str | Omit = omit, + if_case: str | Omit = omit, + body_parent_agent_uuid: str | Omit = omit, + route_name: str | Omit = omit, + uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteUpdateResponse: """ To update an agent route for an agent, send a PUT request to @@ -122,7 +122,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteDeleteResponse: """ To delete an agent route from a parent agent, send a DELETE request to @@ -156,16 +156,16 @@ def add( path_child_agent_uuid: str, *, path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, + body_child_agent_uuid: str | Omit = omit, + if_case: str | Omit = omit, + body_parent_agent_uuid: str | Omit = omit, + route_name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteAddResponse: """ To add an agent route to an agent, send a POST request to @@ -222,7 +222,7 @@ def view( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteViewResponse: """ To view agent routes for an agent, send a GET requtest to @@ -275,17 +275,17 @@ async def update( path_child_agent_uuid: str, *, path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - uuid: str | NotGiven = NOT_GIVEN, + body_child_agent_uuid: str | Omit = omit, + if_case: str | Omit = omit, + body_parent_agent_uuid: str | Omit = omit, + route_name: str | Omit = omit, + uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteUpdateResponse: """ To update an agent route for an agent, send a PUT request to @@ -348,7 +348,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteDeleteResponse: """ To delete an agent route from a parent agent, send a DELETE request to @@ -382,16 +382,16 @@ async def add( path_child_agent_uuid: str, *, path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, + body_child_agent_uuid: str | Omit = omit, + if_case: str | Omit = omit, + body_parent_agent_uuid: str | Omit = omit, + route_name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteAddResponse: """ To add an agent route to an agent, send a POST request to @@ -448,7 +448,7 @@ async def view( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RouteViewResponse: """ To view agent routes for an agent, send a GET requtest to diff --git a/src/gradient/resources/agents/versions.py b/src/gradient/resources/agents/versions.py index bc56e032..0331344a 100644 --- a/src/gradient/resources/agents/versions.py +++ b/src/gradient/resources/agents/versions.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -46,14 +46,14 @@ def update( self, path_uuid: str, *, - body_uuid: str | NotGiven = NOT_GIVEN, - version_hash: str | NotGiven = NOT_GIVEN, + body_uuid: str | Omit = omit, + version_hash: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VersionUpdateResponse: """ To update to a specific agent version, send a PUT request to @@ -95,14 +95,14 @@ def list( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VersionListResponse: """ To list all agent versions, send a GET request to @@ -168,14 +168,14 @@ async def update( self, path_uuid: str, *, - body_uuid: str | NotGiven = NOT_GIVEN, - version_hash: str | NotGiven = NOT_GIVEN, + body_uuid: str | Omit = omit, + version_hash: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VersionUpdateResponse: """ To update to a specific agent version, send a PUT request to @@ -217,14 +217,14 @@ async def list( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VersionListResponse: """ To list all agent versions, send a GET request to diff --git a/src/gradient/resources/chat/completions.py b/src/gradient/resources/chat/completions.py index 80c3d550..e2292bcb 100644 --- a/src/gradient/resources/chat/completions.py +++ b/src/gradient/resources/chat/completions.py @@ -7,7 +7,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import required_args, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -52,29 +52,29 @@ def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse: """ Creates a model response for the given chat conversation. @@ -181,28 +181,28 @@ def create( messages: Iterable[completion_create_params.Message], model: str, stream: Literal[True], - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -309,28 +309,28 @@ def create( messages: Iterable[completion_create_params.Message], model: str, stream: bool, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -436,29 +436,29 @@ def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: return self._post( "/chat/completions" @@ -525,29 +525,29 @@ async def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse: """ Creates a model response for the given chat conversation. @@ -654,28 +654,28 @@ async def create( messages: Iterable[completion_create_params.Message], model: str, stream: Literal[True], - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -782,28 +782,28 @@ async def create( messages: Iterable[completion_create_params.Message], model: str, stream: bool, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. @@ -909,29 +909,29 @@ async def create( *, messages: Iterable[completion_create_params.Message], model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | Omit = omit, + logit_bias: Optional[Dict[str, int]] | Omit = omit, + logprobs: Optional[bool] | Omit = omit, + max_completion_tokens: Optional[int] | Omit = omit, + max_tokens: Optional[int] | Omit = omit, + metadata: Optional[Dict[str, str]] | Omit = omit, + n: Optional[int] | Omit = omit, + presence_penalty: Optional[float] | Omit = omit, + stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + stream_options: Optional[completion_create_params.StreamOptions] | Omit = omit, + temperature: Optional[float] | Omit = omit, + tool_choice: completion_create_params.ToolChoice | Omit = omit, + tools: Iterable[completion_create_params.Tool] | Omit = omit, + top_logprobs: Optional[int] | Omit = omit, + top_p: Optional[float] | Omit = omit, + user: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions" diff --git a/src/gradient/resources/databases/schema_registry/config.py b/src/gradient/resources/databases/schema_registry/config.py index f9c0d8d0..e012dd77 100644 --- a/src/gradient/resources/databases/schema_registry/config.py +++ b/src/gradient/resources/databases/schema_registry/config.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Query, Headers, NotGiven, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -55,7 +55,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigRetrieveResponse: """ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET @@ -98,7 +98,7 @@ def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigUpdateResponse: """ To update the Schema Registry configuration for a Kafka cluster, send a PUT @@ -142,7 +142,7 @@ def retrieve_subject( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigRetrieveSubjectResponse: """ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, @@ -189,7 +189,7 @@ def update_subject( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigUpdateSubjectResponse: """ To update the Schema Registry configuration for a Subject of a Kafka cluster, @@ -258,7 +258,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigRetrieveResponse: """ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET @@ -301,7 +301,7 @@ async def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigUpdateResponse: """ To update the Schema Registry configuration for a Kafka cluster, send a PUT @@ -347,7 +347,7 @@ async def retrieve_subject( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigRetrieveSubjectResponse: """ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster, @@ -394,7 +394,7 @@ async def update_subject( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ConfigUpdateSubjectResponse: """ To update the Schema Registry configuration for a Subject of a Kafka cluster, diff --git a/src/gradient/resources/gpu_droplets/account/keys.py b/src/gradient/resources/gpu_droplets/account/keys.py index f5cd4120..f50b9945 100644 --- a/src/gradient/resources/gpu_droplets/account/keys.py +++ b/src/gradient/resources/gpu_droplets/account/keys.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -56,7 +56,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyCreateResponse: """ To add a new SSH public key to your DigitalOcean account, send a POST request to @@ -102,7 +102,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyRetrieveResponse: """ To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` @@ -136,13 +136,13 @@ def update( self, ssh_key_identifier: Union[int, str], *, - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyUpdateResponse: """ To update the name of an SSH key, send a PUT request to either @@ -178,14 +178,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListResponse: """ To list all of the keys in your account, send a GET request to @@ -233,7 +233,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy a public SSH key that you have in your account, send a DELETE request @@ -295,7 +295,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyCreateResponse: """ To add a new SSH public key to your DigitalOcean account, send a POST request to @@ -341,7 +341,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyRetrieveResponse: """ To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` @@ -375,13 +375,13 @@ async def update( self, ssh_key_identifier: Union[int, str], *, - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyUpdateResponse: """ To update the name of an SSH key, send a PUT request to either @@ -417,14 +417,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KeyListResponse: """ To list all of the keys in your account, send a GET request to @@ -472,7 +472,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy a public SSH key that you have in your account, send a DELETE request diff --git a/src/gradient/resources/gpu_droplets/actions.py b/src/gradient/resources/gpu_droplets/actions.py index 715fb076..a708fb67 100644 --- a/src/gradient/resources/gpu_droplets/actions.py +++ b/src/gradient/resources/gpu_droplets/actions.py @@ -7,7 +7,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import required_args, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -58,7 +58,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionRetrieveResponse: """ To retrieve a Droplet action, send a GET request to @@ -90,14 +90,14 @@ def list( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve a list of all actions that have been executed for a Droplet, send a @@ -161,13 +161,13 @@ def bulk_initiate( "enable_ipv6", "snapshot", ], - tag_name: str | NotGiven = NOT_GIVEN, + tag_name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionBulkInitiateResponse: """Some actions can be performed in bulk on tagged Droplets. @@ -223,14 +223,14 @@ def bulk_initiate( "enable_ipv6", "snapshot", ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + tag_name: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionBulkInitiateResponse: """Some actions can be performed in bulk on tagged Droplets. @@ -288,14 +288,14 @@ def bulk_initiate( "enable_ipv6", "snapshot", ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + tag_name: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionBulkInitiateResponse: return self._post( "/v2/droplets/actions" @@ -345,7 +345,7 @@ def initiate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -406,13 +406,13 @@ def initiate( "enable_ipv6", "snapshot", ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -476,13 +476,13 @@ def initiate( "enable_ipv6", "snapshot", ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -545,13 +545,13 @@ def initiate( "enable_ipv6", "snapshot", ], - image: int | NotGiven = NOT_GIVEN, + image: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -614,14 +614,14 @@ def initiate( "enable_ipv6", "snapshot", ], - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, + disk: bool | Omit = omit, + size: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -688,13 +688,13 @@ def initiate( "enable_ipv6", "snapshot", ], - image: Union[str, int] | NotGiven = NOT_GIVEN, + image: Union[str, int] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -758,13 +758,13 @@ def initiate( "enable_ipv6", "snapshot", ], - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -827,13 +827,13 @@ def initiate( "enable_ipv6", "snapshot", ], - kernel: int | NotGiven = NOT_GIVEN, + kernel: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -896,13 +896,13 @@ def initiate( "enable_ipv6", "snapshot", ], - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -965,18 +965,18 @@ def initiate( "enable_ipv6", "snapshot", ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - image: int | Union[str, int] | NotGiven = NOT_GIVEN, - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - kernel: int | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + image: int | Union[str, int] | Omit = omit, + disk: bool | Omit = omit, + size: str | Omit = omit, + name: str | Omit = omit, + kernel: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: return self._post( f"/v2/droplets/{droplet_id}/actions" @@ -1031,7 +1031,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionRetrieveResponse: """ To retrieve a Droplet action, send a GET request to @@ -1063,14 +1063,14 @@ async def list( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve a list of all actions that have been executed for a Droplet, send a @@ -1134,13 +1134,13 @@ async def bulk_initiate( "enable_ipv6", "snapshot", ], - tag_name: str | NotGiven = NOT_GIVEN, + tag_name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionBulkInitiateResponse: """Some actions can be performed in bulk on tagged Droplets. @@ -1196,14 +1196,14 @@ async def bulk_initiate( "enable_ipv6", "snapshot", ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + tag_name: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionBulkInitiateResponse: """Some actions can be performed in bulk on tagged Droplets. @@ -1261,14 +1261,14 @@ async def bulk_initiate( "enable_ipv6", "snapshot", ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + tag_name: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionBulkInitiateResponse: return await self._post( "/v2/droplets/actions" @@ -1320,7 +1320,7 @@ async def initiate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1381,13 +1381,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1451,13 +1451,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1520,13 +1520,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - image: int | NotGiven = NOT_GIVEN, + image: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1589,14 +1589,14 @@ async def initiate( "enable_ipv6", "snapshot", ], - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, + disk: bool | Omit = omit, + size: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1663,13 +1663,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - image: Union[str, int] | NotGiven = NOT_GIVEN, + image: Union[str, int] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1733,13 +1733,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1802,13 +1802,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - kernel: int | NotGiven = NOT_GIVEN, + kernel: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1871,13 +1871,13 @@ async def initiate( "enable_ipv6", "snapshot", ], - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: """ To initiate an action on a Droplet send a POST request to @@ -1940,18 +1940,18 @@ async def initiate( "enable_ipv6", "snapshot", ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - image: int | Union[str, int] | NotGiven = NOT_GIVEN, - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - kernel: int | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + image: int | Union[str, int] | Omit = omit, + disk: bool | Omit = omit, + size: str | Omit = omit, + name: str | Omit = omit, + kernel: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateResponse: return await self._post( f"/v2/droplets/{droplet_id}/actions" diff --git a/src/gradient/resources/gpu_droplets/autoscale.py b/src/gradient/resources/gpu_droplets/autoscale.py index 342256f6..8df17f7a 100644 --- a/src/gradient/resources/gpu_droplets/autoscale.py +++ b/src/gradient/resources/gpu_droplets/autoscale.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -64,7 +64,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleCreateResponse: """ To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` @@ -114,7 +114,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleRetrieveResponse: """ To show information about an individual autoscale pool, send a GET request to @@ -153,7 +153,7 @@ def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleUpdateResponse: """ To update the configuration of an existing autoscale pool, send a PUT request to @@ -197,15 +197,15 @@ def update( def list( self, *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + name: str | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleListResponse: """ To list all autoscale pools in your team, send a GET request to @@ -258,7 +258,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy an autoscale pool, send a DELETE request to the @@ -298,7 +298,7 @@ def delete_dangerous( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy an autoscale pool and its associated resources (Droplets), send a @@ -332,14 +332,14 @@ def list_history( self, autoscale_pool_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleListHistoryResponse: """ To list all of the scaling history events of an autoscale pool, send a GET @@ -387,14 +387,14 @@ def list_members( self, autoscale_pool_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleListMembersResponse: """ To list the Droplets in an autoscale pool, send a GET request to @@ -471,7 +471,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleCreateResponse: """ To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` @@ -521,7 +521,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleRetrieveResponse: """ To show information about an individual autoscale pool, send a GET request to @@ -560,7 +560,7 @@ async def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleUpdateResponse: """ To update the configuration of an existing autoscale pool, send a PUT request to @@ -604,15 +604,15 @@ async def update( async def list( self, *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + name: str | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleListResponse: """ To list all autoscale pools in your team, send a GET request to @@ -665,7 +665,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy an autoscale pool, send a DELETE request to the @@ -705,7 +705,7 @@ async def delete_dangerous( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy an autoscale pool and its associated resources (Droplets), send a @@ -739,14 +739,14 @@ async def list_history( self, autoscale_pool_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleListHistoryResponse: """ To list all of the scaling history events of an autoscale pool, send a GET @@ -794,14 +794,14 @@ async def list_members( self, autoscale_pool_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AutoscaleListMembersResponse: """ To list the Droplets in an autoscale pool, send a GET request to diff --git a/src/gradient/resources/gpu_droplets/backups.py b/src/gradient/resources/gpu_droplets/backups.py index 9f20a047..065aa3d1 100644 --- a/src/gradient/resources/gpu_droplets/backups.py +++ b/src/gradient/resources/gpu_droplets/backups.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -48,14 +48,14 @@ def list( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupListResponse: """ To retrieve any backups associated with a Droplet, send a GET request to @@ -101,14 +101,14 @@ def list( def list_policies( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupListPoliciesResponse: """ To list information about the backup policies for all Droplets in the account, @@ -155,7 +155,7 @@ def list_supported_policies( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupListSupportedPoliciesResponse: """ To retrieve a list of all supported Droplet backup policies, send a GET request @@ -180,7 +180,7 @@ def retrieve_policy( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupRetrievePolicyResponse: """ To show information about an individual Droplet's backup policy, send a GET @@ -230,14 +230,14 @@ async def list( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupListResponse: """ To retrieve any backups associated with a Droplet, send a GET request to @@ -283,14 +283,14 @@ async def list( async def list_policies( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupListPoliciesResponse: """ To list information about the backup policies for all Droplets in the account, @@ -337,7 +337,7 @@ async def list_supported_policies( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupListSupportedPoliciesResponse: """ To retrieve a list of all supported Droplet backup policies, send a GET request @@ -362,7 +362,7 @@ async def retrieve_policy( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> BackupRetrievePolicyResponse: """ To show information about an individual Droplet's backup policy, send a GET diff --git a/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py index 0d55cb48..2ccad852 100644 --- a/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py +++ b/src/gradient/resources/gpu_droplets/destroy_with_associated_resources.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -55,7 +55,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DestroyWithAssociatedResourceListResponse: """ To list the associated billable resources that can be destroyed along with a @@ -97,7 +97,7 @@ def check_status( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DestroyWithAssociatedResourceCheckStatusResponse: """ To check on the status of a request to destroy a Droplet with its associated @@ -133,7 +133,7 @@ def delete_dangerous( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy a Droplet along with all of its associated resources, send a DELETE @@ -172,17 +172,17 @@ def delete_selective( self, droplet_id: int, *, - floating_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - reserved_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - volume_snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + floating_ips: SequenceNotStr[str] | Omit = omit, + reserved_ips: SequenceNotStr[str] | Omit = omit, + snapshots: SequenceNotStr[str] | Omit = omit, + volume_snapshots: SequenceNotStr[str] | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy a Droplet along with a sub-set of its associated resources, send a @@ -250,7 +250,7 @@ def retry( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ If the status of a request to destroy a Droplet with its associated resources @@ -311,7 +311,7 @@ async def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DestroyWithAssociatedResourceListResponse: """ To list the associated billable resources that can be destroyed along with a @@ -353,7 +353,7 @@ async def check_status( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DestroyWithAssociatedResourceCheckStatusResponse: """ To check on the status of a request to destroy a Droplet with its associated @@ -389,7 +389,7 @@ async def delete_dangerous( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy a Droplet along with all of its associated resources, send a DELETE @@ -428,17 +428,17 @@ async def delete_selective( self, droplet_id: int, *, - floating_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - reserved_ips: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - volume_snapshots: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + floating_ips: SequenceNotStr[str] | Omit = omit, + reserved_ips: SequenceNotStr[str] | Omit = omit, + snapshots: SequenceNotStr[str] | Omit = omit, + volume_snapshots: SequenceNotStr[str] | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To destroy a Droplet along with a sub-set of its associated resources, send a @@ -506,7 +506,7 @@ async def retry( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ If the status of a request to destroy a Droplet with its associated resources diff --git a/src/gradient/resources/gpu_droplets/firewalls/droplets.py b/src/gradient/resources/gpu_droplets/firewalls/droplets.py index b25aa3e3..90bcb47e 100644 --- a/src/gradient/resources/gpu_droplets/firewalls/droplets.py +++ b/src/gradient/resources/gpu_droplets/firewalls/droplets.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -52,7 +52,7 @@ def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To assign a Droplet to a firewall, send a POST request to @@ -98,7 +98,7 @@ def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove a Droplet from a firewall, send a DELETE request to @@ -165,7 +165,7 @@ async def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To assign a Droplet to a firewall, send a POST request to @@ -211,7 +211,7 @@ async def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove a Droplet from a firewall, send a DELETE request to diff --git a/src/gradient/resources/gpu_droplets/firewalls/firewalls.py b/src/gradient/resources/gpu_droplets/firewalls/firewalls.py index 116cde8d..a5fee406 100644 --- a/src/gradient/resources/gpu_droplets/firewalls/firewalls.py +++ b/src/gradient/resources/gpu_droplets/firewalls/firewalls.py @@ -28,7 +28,7 @@ DropletsResourceWithStreamingResponse, AsyncDropletsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -84,13 +84,13 @@ def with_streaming_response(self) -> FirewallsResourceWithStreamingResponse: def create( self, *, - body: firewall_create_params.Body | NotGiven = NOT_GIVEN, + body: firewall_create_params.Body | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallCreateResponse: """To create a new firewall, send a POST request to `/v2/firewalls`. @@ -124,7 +124,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallRetrieveResponse: """ To show information about an existing firewall, send a GET request to @@ -161,7 +161,7 @@ def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallUpdateResponse: """ To update the configuration of an existing firewall, send a PUT request to @@ -197,14 +197,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallListResponse: """ To list all of the firewalls available on your account, send a GET request to @@ -250,7 +250,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. @@ -317,13 +317,13 @@ def with_streaming_response(self) -> AsyncFirewallsResourceWithStreamingResponse async def create( self, *, - body: firewall_create_params.Body | NotGiven = NOT_GIVEN, + body: firewall_create_params.Body | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallCreateResponse: """To create a new firewall, send a POST request to `/v2/firewalls`. @@ -357,7 +357,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallRetrieveResponse: """ To show information about an existing firewall, send a GET request to @@ -394,7 +394,7 @@ async def update( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallUpdateResponse: """ To update the configuration of an existing firewall, send a PUT request to @@ -430,14 +430,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FirewallListResponse: """ To list all of the firewalls available on your account, send a GET request to @@ -483,7 +483,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. diff --git a/src/gradient/resources/gpu_droplets/firewalls/rules.py b/src/gradient/resources/gpu_droplets/firewalls/rules.py index d3a77cd9..f669fc6d 100644 --- a/src/gradient/resources/gpu_droplets/firewalls/rules.py +++ b/src/gradient/resources/gpu_droplets/firewalls/rules.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -46,14 +46,14 @@ def add( self, firewall_id: str, *, - inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, + inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | Omit = omit, + outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To add additional access rules to a firewall, send a POST request to @@ -98,14 +98,14 @@ def remove( self, firewall_id: str, *, - inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, + inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | Omit = omit, + outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove access rules from a firewall, send a DELETE request to @@ -171,14 +171,14 @@ async def add( self, firewall_id: str, *, - inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, + inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | Omit = omit, + outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To add additional access rules to a firewall, send a POST request to @@ -223,14 +223,14 @@ async def remove( self, firewall_id: str, *, - inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, + inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | Omit = omit, + outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove access rules from a firewall, send a DELETE request to diff --git a/src/gradient/resources/gpu_droplets/firewalls/tags.py b/src/gradient/resources/gpu_droplets/firewalls/tags.py index ee13acd5..82d613fb 100644 --- a/src/gradient/resources/gpu_droplets/firewalls/tags.py +++ b/src/gradient/resources/gpu_droplets/firewalls/tags.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ...._types import Body, Query, Headers, NoneType, NotGiven, SequenceNotStr, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -52,7 +52,7 @@ def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To assign a tag representing a group of Droplets to a firewall, send a POST @@ -101,7 +101,7 @@ def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove a tag representing a group of Droplets from a firewall, send a DELETE @@ -171,7 +171,7 @@ async def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To assign a tag representing a group of Droplets to a firewall, send a POST @@ -220,7 +220,7 @@ async def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove a tag representing a group of Droplets from a firewall, send a DELETE diff --git a/src/gradient/resources/gpu_droplets/floating_ips/actions.py b/src/gradient/resources/gpu_droplets/floating_ips/actions.py index ecf88993..f73d5707 100644 --- a/src/gradient/resources/gpu_droplets/floating_ips/actions.py +++ b/src/gradient/resources/gpu_droplets/floating_ips/actions.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -56,7 +56,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionCreateResponse: """ To initiate an action on a floating IP send a POST request to @@ -93,7 +93,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionCreateResponse: """ To initiate an action on a floating IP send a POST request to @@ -126,13 +126,13 @@ def create( floating_ip: str, *, type: Literal["assign", "unassign"], - droplet_id: int | NotGiven = NOT_GIVEN, + droplet_id: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionCreateResponse: if not floating_ip: raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") @@ -163,7 +163,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionRetrieveResponse: """ To retrieve the status of a floating IP action, send a GET request to @@ -199,7 +199,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve all actions that have been executed on a floating IP, send a GET @@ -258,7 +258,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionCreateResponse: """ To initiate an action on a floating IP send a POST request to @@ -295,7 +295,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionCreateResponse: """ To initiate an action on a floating IP send a POST request to @@ -328,13 +328,13 @@ async def create( floating_ip: str, *, type: Literal["assign", "unassign"], - droplet_id: int | NotGiven = NOT_GIVEN, + droplet_id: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionCreateResponse: if not floating_ip: raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") @@ -365,7 +365,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionRetrieveResponse: """ To retrieve the status of a floating IP action, send a GET request to @@ -401,7 +401,7 @@ async def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve all actions that have been executed on a floating IP, send a GET diff --git a/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py index f70f153f..58b4bdb0 100644 --- a/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py +++ b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py @@ -14,7 +14,7 @@ ActionsResourceWithStreamingResponse, AsyncActionsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -67,7 +67,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPCreateResponse: """ On creation, a floating IP must be either assigned to a Droplet or reserved to a @@ -100,13 +100,13 @@ def create( self, *, region: str, - project_id: str | NotGiven = NOT_GIVEN, + project_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPCreateResponse: """ On creation, a floating IP must be either assigned to a Droplet or reserved to a @@ -140,15 +140,15 @@ def create( def create( self, *, - droplet_id: int | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, + droplet_id: int | Omit = omit, + region: str | Omit = omit, + project_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPCreateResponse: return self._post( "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", @@ -175,7 +175,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPRetrieveResponse: """ To show information about a floating IP, send a GET request to @@ -205,14 +205,14 @@ def retrieve( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPListResponse: """ To list all of the floating IPs available on your account, send a GET request to @@ -258,7 +258,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a floating IP and remove it from your account, send a DELETE request @@ -324,7 +324,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPCreateResponse: """ On creation, a floating IP must be either assigned to a Droplet or reserved to a @@ -357,13 +357,13 @@ async def create( self, *, region: str, - project_id: str | NotGiven = NOT_GIVEN, + project_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPCreateResponse: """ On creation, a floating IP must be either assigned to a Droplet or reserved to a @@ -397,15 +397,15 @@ async def create( async def create( self, *, - droplet_id: int | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, + droplet_id: int | Omit = omit, + region: str | Omit = omit, + project_id: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPCreateResponse: return await self._post( "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", @@ -432,7 +432,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPRetrieveResponse: """ To show information about a floating IP, send a GET request to @@ -462,14 +462,14 @@ async def retrieve( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> FloatingIPListResponse: """ To list all of the floating IPs available on your account, send a GET request to @@ -515,7 +515,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a floating IP and remove it from your account, send a DELETE request diff --git a/src/gradient/resources/gpu_droplets/gpu_droplets.py b/src/gradient/resources/gpu_droplets/gpu_droplets.py index 48a9e5fe..c9f84747 100644 --- a/src/gradient/resources/gpu_droplets/gpu_droplets.py +++ b/src/gradient/resources/gpu_droplets/gpu_droplets.py @@ -39,7 +39,7 @@ BackupsResourceWithStreamingResponse, AsyncBackupsResourceWithStreamingResponse, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ..._utils import required_args, maybe_transform, async_maybe_transform from ..._compat import cached_property from .autoscale import ( @@ -209,24 +209,24 @@ def create( image: Union[str, int], name: str, size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + backups: bool | Omit = omit, + ipv6: bool | Omit = omit, + monitoring: bool | Omit = omit, + private_networking: bool | Omit = omit, + region: str | Omit = omit, + ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + user_data: str | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, + with_droplet_agent: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletCreateResponse: """ To create a new Droplet, send a POST request to `/v2/droplets` setting the @@ -330,24 +330,24 @@ def create( image: Union[str, int], names: SequenceNotStr[str], size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + backups: bool | Omit = omit, + ipv6: bool | Omit = omit, + monitoring: bool | Omit = omit, + private_networking: bool | Omit = omit, + region: str | Omit = omit, + ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + user_data: str | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, + with_droplet_agent: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletCreateResponse: """ To create a new Droplet, send a POST request to `/v2/droplets` setting the @@ -450,27 +450,27 @@ def create( self, *, image: Union[str, int], - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - names: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + backups: bool | Omit = omit, + ipv6: bool | Omit = omit, + monitoring: bool | Omit = omit, + private_networking: bool | Omit = omit, + region: str | Omit = omit, + ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + user_data: str | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, + with_droplet_agent: bool | Omit = omit, + names: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletCreateResponse: return cast( GPUDropletCreateResponse, @@ -515,7 +515,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletRetrieveResponse: """ To show information about an individual Droplet, send a GET request to @@ -543,17 +543,17 @@ def retrieve( def list( self, *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, + name: str | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, + tag_name: str | Omit = omit, + type: Literal["droplets", "gpus"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListResponse: """ To list all Droplets in your account, send a GET request to `/v2/droplets`. @@ -626,7 +626,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. @@ -663,7 +663,7 @@ def delete_by_tag( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete **all** Droplets assigned to a specific tag, include the `tag_name` @@ -705,14 +705,14 @@ def list_firewalls( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListFirewallsResponse: """ To retrieve a list of all firewalls available to a Droplet, send a GET request @@ -759,14 +759,14 @@ def list_kernels( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListKernelsResponse: """ To retrieve a list of all kernels available to a Droplet, send a GET request to @@ -818,7 +818,7 @@ def list_neighbors( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListNeighborsResponse: """To retrieve a list of any "neighbors" (i.e. @@ -854,14 +854,14 @@ def list_snapshots( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListSnapshotsResponse: """ To retrieve the snapshots that have been created from a Droplet, send a GET @@ -980,24 +980,24 @@ async def create( image: Union[str, int], name: str, size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + backups: bool | Omit = omit, + ipv6: bool | Omit = omit, + monitoring: bool | Omit = omit, + private_networking: bool | Omit = omit, + region: str | Omit = omit, + ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + user_data: str | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, + with_droplet_agent: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletCreateResponse: """ To create a new Droplet, send a POST request to `/v2/droplets` setting the @@ -1101,24 +1101,24 @@ async def create( image: Union[str, int], names: SequenceNotStr[str], size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + backups: bool | Omit = omit, + ipv6: bool | Omit = omit, + monitoring: bool | Omit = omit, + private_networking: bool | Omit = omit, + region: str | Omit = omit, + ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + user_data: str | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, + with_droplet_agent: bool | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletCreateResponse: """ To create a new Droplet, send a POST request to `/v2/droplets` setting the @@ -1221,27 +1221,27 @@ async def create( self, *, image: Union[str, int], - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: SequenceNotStr[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - names: SequenceNotStr[str] | NotGiven = NOT_GIVEN, + backup_policy: DropletBackupPolicyParam | Omit = omit, + backups: bool | Omit = omit, + ipv6: bool | Omit = omit, + monitoring: bool | Omit = omit, + private_networking: bool | Omit = omit, + region: str | Omit = omit, + ssh_keys: SequenceNotStr[Union[str, int]] | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + user_data: str | Omit = omit, + volumes: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, + with_droplet_agent: bool | Omit = omit, + names: SequenceNotStr[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletCreateResponse: return cast( GPUDropletCreateResponse, @@ -1286,7 +1286,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletRetrieveResponse: """ To show information about an individual Droplet, send a GET request to @@ -1314,17 +1314,17 @@ async def retrieve( async def list( self, *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, + name: str | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, + tag_name: str | Omit = omit, + type: Literal["droplets", "gpus"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListResponse: """ To list all Droplets in your account, send a GET request to `/v2/droplets`. @@ -1397,7 +1397,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. @@ -1434,7 +1434,7 @@ async def delete_by_tag( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete **all** Droplets assigned to a specific tag, include the `tag_name` @@ -1476,14 +1476,14 @@ async def list_firewalls( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListFirewallsResponse: """ To retrieve a list of all firewalls available to a Droplet, send a GET request @@ -1530,14 +1530,14 @@ async def list_kernels( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListKernelsResponse: """ To retrieve a list of all kernels available to a Droplet, send a GET request to @@ -1589,7 +1589,7 @@ async def list_neighbors( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListNeighborsResponse: """To retrieve a list of any "neighbors" (i.e. @@ -1625,14 +1625,14 @@ async def list_snapshots( self, droplet_id: int, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> GPUDropletListSnapshotsResponse: """ To retrieve the snapshots that have been created from a Droplet, send a GET diff --git a/src/gradient/resources/gpu_droplets/images/actions.py b/src/gradient/resources/gpu_droplets/images/actions.py index 287558ca..d2d33f11 100644 --- a/src/gradient/resources/gpu_droplets/images/actions.py +++ b/src/gradient/resources/gpu_droplets/images/actions.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -55,7 +55,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: """ The following actions are available on an Image. @@ -112,7 +112,7 @@ def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: """ The following actions are available on an Image. @@ -167,13 +167,13 @@ def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: return self._post( f"/v2/images/{image_id}/actions" @@ -202,7 +202,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: """ To retrieve the status of an image action, send a GET request to @@ -236,7 +236,7 @@ def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve all actions that have been executed on an image, send a GET request @@ -293,7 +293,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: """ The following actions are available on an Image. @@ -350,7 +350,7 @@ async def create( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: """ The following actions are available on an Image. @@ -405,13 +405,13 @@ async def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: return await self._post( f"/v2/images/{image_id}/actions" @@ -440,7 +440,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Action: """ To retrieve the status of an image action, send a GET request to @@ -474,7 +474,7 @@ async def list( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve all actions that have been executed on an image, send a GET request diff --git a/src/gradient/resources/gpu_droplets/images/images.py b/src/gradient/resources/gpu_droplets/images/images.py index 1b00c024..83e04d13 100644 --- a/src/gradient/resources/gpu_droplets/images/images.py +++ b/src/gradient/resources/gpu_droplets/images/images.py @@ -15,7 +15,7 @@ ActionsResourceWithStreamingResponse, AsyncActionsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -62,7 +62,7 @@ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: def create( self, *, - description: str | NotGiven = NOT_GIVEN, + description: str | Omit = omit, distribution: Literal[ "Arch Linux", "CentOS", @@ -78,8 +78,8 @@ def create( "Ubuntu", "Unknown", ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + | Omit = omit, + name: str | Omit = omit, region: Literal[ "ams1", "ams2", @@ -97,15 +97,15 @@ def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - url: str | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + url: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageCreateResponse: """To create a new custom image, send a POST request to /v2/images. @@ -176,7 +176,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageRetrieveResponse: """ To retrieve information about an image, send a `GET` request to @@ -205,7 +205,7 @@ def update( self, image_id: int, *, - description: str | NotGiven = NOT_GIVEN, + description: str | Omit = omit, distribution: Literal[ "Arch Linux", "CentOS", @@ -221,14 +221,14 @@ def update( "Ubuntu", "Unknown", ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageUpdateResponse: """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. @@ -277,17 +277,17 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - private: bool | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + private: bool | Omit = omit, + tag_name: str | Omit = omit, + type: Literal["application", "distribution"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageListResponse: """ To list all of the images available on your account, send a GET request to @@ -374,7 +374,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a snapshot or custom image, send a `DELETE` request to @@ -428,7 +428,7 @@ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: async def create( self, *, - description: str | NotGiven = NOT_GIVEN, + description: str | Omit = omit, distribution: Literal[ "Arch Linux", "CentOS", @@ -444,8 +444,8 @@ async def create( "Ubuntu", "Unknown", ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + | Omit = omit, + name: str | Omit = omit, region: Literal[ "ams1", "ams2", @@ -463,15 +463,15 @@ async def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - url: str | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + url: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageCreateResponse: """To create a new custom image, send a POST request to /v2/images. @@ -542,7 +542,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageRetrieveResponse: """ To retrieve information about an image, send a `GET` request to @@ -571,7 +571,7 @@ async def update( self, image_id: int, *, - description: str | NotGiven = NOT_GIVEN, + description: str | Omit = omit, distribution: Literal[ "Arch Linux", "CentOS", @@ -587,14 +587,14 @@ async def update( "Ubuntu", "Unknown", ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageUpdateResponse: """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. @@ -643,17 +643,17 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - private: bool | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + private: bool | Omit = omit, + tag_name: str | Omit = omit, + type: Literal["application", "distribution"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ImageListResponse: """ To list all of the images available on your account, send a GET request to @@ -740,7 +740,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a snapshot or custom image, send a `DELETE` request to diff --git a/src/gradient/resources/gpu_droplets/load_balancers/droplets.py b/src/gradient/resources/gpu_droplets/load_balancers/droplets.py index 4d8eb4c5..ddcdc63a 100644 --- a/src/gradient/resources/gpu_droplets/load_balancers/droplets.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/droplets.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -52,7 +52,7 @@ def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To assign a Droplet to a load balancer instance, send a POST request to @@ -101,7 +101,7 @@ def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove a Droplet from a load balancer instance, send a DELETE request to @@ -168,7 +168,7 @@ async def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To assign a Droplet to a load balancer instance, send a POST request to @@ -217,7 +217,7 @@ async def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove a Droplet from a load balancer instance, send a DELETE request to diff --git a/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py index c4be65e7..8f9092e0 100644 --- a/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/forwarding_rules.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -53,7 +53,7 @@ def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To add an additional forwarding rule to a load balancer instance, send a POST @@ -100,7 +100,7 @@ def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove forwarding rules from a load balancer instance, send a DELETE request @@ -168,7 +168,7 @@ async def add( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To add an additional forwarding rule to a load balancer instance, send a POST @@ -215,7 +215,7 @@ async def remove( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To remove forwarding rules from a load balancer instance, send a DELETE request diff --git a/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py index 8f11a5da..2a1e52d9 100644 --- a/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py +++ b/src/gradient/resources/gpu_droplets/load_balancers/load_balancers.py @@ -15,7 +15,7 @@ DropletsResourceWithStreamingResponse, AsyncDropletsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -86,21 +86,21 @@ def create( self, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -118,20 +118,20 @@ def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerCreateResponse: """ To create a new load balancer instance, send a POST request to @@ -245,20 +245,20 @@ def create( self, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -276,21 +276,21 @@ def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + tag: str | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerCreateResponse: """ To create a new load balancer instance, send a POST request to @@ -405,21 +405,21 @@ def create( self, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -437,21 +437,21 @@ def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, + tag: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerCreateResponse: return self._post( "/v2/load_balancers" @@ -502,7 +502,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerRetrieveResponse: """ To show information about a load balancer instance, send a GET request to @@ -535,21 +535,21 @@ def update( lb_id: str, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -567,20 +567,20 @@ def update( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerUpdateResponse: """ To update a load balancer's settings, send a PUT request to @@ -689,20 +689,20 @@ def update( lb_id: str, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -720,21 +720,21 @@ def update( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + tag: str | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerUpdateResponse: """ To update a load balancer's settings, send a PUT request to @@ -844,21 +844,21 @@ def update( lb_id: str, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -876,21 +876,21 @@ def update( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, + tag: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerUpdateResponse: if not lb_id: raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") @@ -937,14 +937,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerListResponse: """ To list all of the load balancer instances on your account, send a GET request @@ -992,7 +992,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a load balancer instance, disassociating any Droplets assigned to it @@ -1033,7 +1033,7 @@ def delete_cache( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a Global load balancer CDN cache, send a DELETE request to @@ -1098,21 +1098,21 @@ async def create( self, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1130,20 +1130,20 @@ async def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerCreateResponse: """ To create a new load balancer instance, send a POST request to @@ -1257,20 +1257,20 @@ async def create( self, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1288,21 +1288,21 @@ async def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + tag: str | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerCreateResponse: """ To create a new load balancer instance, send a POST request to @@ -1417,21 +1417,21 @@ async def create( self, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1449,21 +1449,21 @@ async def create( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, + tag: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerCreateResponse: return await self._post( "/v2/load_balancers" @@ -1514,7 +1514,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerRetrieveResponse: """ To show information about a load balancer instance, send a GET request to @@ -1547,21 +1547,21 @@ async def update( lb_id: str, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1579,20 +1579,20 @@ async def update( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerUpdateResponse: """ To update a load balancer's settings, send a PUT request to @@ -1701,20 +1701,20 @@ async def update( lb_id: str, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1732,21 +1732,21 @@ async def update( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + tag: str | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerUpdateResponse: """ To update a load balancer's settings, send a PUT request to @@ -1856,21 +1856,21 @@ async def update( lb_id: str, *, forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + algorithm: Literal["round_robin", "least_connections"] | Omit = omit, + disable_lets_encrypt_dns_records: bool | Omit = omit, + domains: Iterable[DomainsParam] | Omit = omit, + droplet_ids: Iterable[int] | Omit = omit, + enable_backend_keepalive: bool | Omit = omit, + enable_proxy_protocol: bool | Omit = omit, + firewall: LbFirewallParam | Omit = omit, + glb_settings: GlbSettingsParam | Omit = omit, + health_check: HealthCheckParam | Omit = omit, + http_idle_timeout_seconds: int | Omit = omit, + name: str | Omit = omit, + network: Literal["EXTERNAL", "INTERNAL"] | Omit = omit, + network_stack: Literal["IPV4", "DUALSTACK"] | Omit = omit, + project_id: str | Omit = omit, + redirect_http_to_https: bool | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1888,21 +1888,21 @@ async def update( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, + | Omit = omit, + size: Literal["lb-small", "lb-medium", "lb-large"] | Omit = omit, + size_unit: int | Omit = omit, + sticky_sessions: StickySessionsParam | Omit = omit, + target_load_balancer_ids: SequenceNotStr[str] | Omit = omit, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | Omit = omit, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | Omit = omit, + vpc_uuid: str | Omit = omit, + tag: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerUpdateResponse: if not lb_id: raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") @@ -1949,14 +1949,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> LoadBalancerListResponse: """ To list all of the load balancer instances on your account, send a GET request @@ -2004,7 +2004,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a load balancer instance, disassociating any Droplets assigned to it @@ -2045,7 +2045,7 @@ async def delete_cache( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a Global load balancer CDN cache, send a DELETE request to diff --git a/src/gradient/resources/gpu_droplets/sizes.py b/src/gradient/resources/gpu_droplets/sizes.py index 7cfc5629..9893903f 100644 --- a/src/gradient/resources/gpu_droplets/sizes.py +++ b/src/gradient/resources/gpu_droplets/sizes.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -44,14 +44,14 @@ def with_streaming_response(self) -> SizesResourceWithStreamingResponse: def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SizeListResponse: """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. @@ -115,14 +115,14 @@ def with_streaming_response(self) -> AsyncSizesResourceWithStreamingResponse: async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SizeListResponse: """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. diff --git a/src/gradient/resources/gpu_droplets/snapshots.py b/src/gradient/resources/gpu_droplets/snapshots.py index eed93cfd..78bd01ac 100644 --- a/src/gradient/resources/gpu_droplets/snapshots.py +++ b/src/gradient/resources/gpu_droplets/snapshots.py @@ -7,7 +7,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -54,7 +54,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotRetrieveResponse: """ To retrieve information about a snapshot, send a GET request to @@ -87,15 +87,15 @@ def retrieve( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + resource_type: Literal["droplet", "volume"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotListResponse: """ To list all of the snapshots available on your account, send a GET request to @@ -162,7 +162,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Both Droplet and volume snapshots are managed through the `/v2/snapshots/` @@ -224,7 +224,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotRetrieveResponse: """ To retrieve information about a snapshot, send a GET request to @@ -257,15 +257,15 @@ async def retrieve( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + resource_type: Literal["droplet", "volume"] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotListResponse: """ To list all of the snapshots available on your account, send a GET request to @@ -332,7 +332,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Both Droplet and volume snapshots are managed through the `/v2/snapshots/` diff --git a/src/gradient/resources/gpu_droplets/volumes/actions.py b/src/gradient/resources/gpu_droplets/volumes/actions.py index c648beaa..1c0c66a0 100644 --- a/src/gradient/resources/gpu_droplets/volumes/actions.py +++ b/src/gradient/resources/gpu_droplets/volumes/actions.py @@ -7,7 +7,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -57,14 +57,14 @@ def retrieve( action_id: int, *, volume_id: str, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionRetrieveResponse: """ To retrieve the status of a volume action, send a GET request to @@ -109,14 +109,14 @@ def list( self, volume_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve all actions that have been executed on a volume, send a GET request @@ -164,8 +164,8 @@ def initiate_by_id( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -183,14 +183,14 @@ def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: """ To initiate an action on a block storage volume by Id, send a POST request to @@ -265,8 +265,8 @@ def initiate_by_id( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -284,13 +284,13 @@ def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: """ To initiate an action on a block storage volume by Id, send a POST request to @@ -360,8 +360,8 @@ def initiate_by_id( *, size_gigabytes: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -379,13 +379,13 @@ def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: """ To initiate an action on a block storage volume by Id, send a POST request to @@ -452,10 +452,10 @@ def initiate_by_id( self, volume_id: str, *, - droplet_id: int | NotGiven = NOT_GIVEN, + droplet_id: int | Omit = omit, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -473,15 +473,15 @@ def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - size_gigabytes: int | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + size_gigabytes: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: if not volume_id: raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") @@ -521,8 +521,8 @@ def initiate_by_name( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -540,14 +540,14 @@ def initiate_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByNameResponse: """ To initiate an action on a block storage volume by Name, send a POST request to @@ -613,8 +613,8 @@ def initiate_by_name( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -632,13 +632,13 @@ def initiate_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByNameResponse: """ To initiate an action on a block storage volume by Name, send a POST request to @@ -699,8 +699,8 @@ def initiate_by_name( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -718,14 +718,14 @@ def initiate_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByNameResponse: return self._post( "/v2/volumes/actions" @@ -782,14 +782,14 @@ async def retrieve( action_id: int, *, volume_id: str, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionRetrieveResponse: """ To retrieve the status of a volume action, send a GET request to @@ -834,14 +834,14 @@ async def list( self, volume_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionListResponse: """ To retrieve all actions that have been executed on a volume, send a GET request @@ -889,8 +889,8 @@ async def initiate_by_id( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -908,14 +908,14 @@ async def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: """ To initiate an action on a block storage volume by Id, send a POST request to @@ -990,8 +990,8 @@ async def initiate_by_id( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1009,13 +1009,13 @@ async def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: """ To initiate an action on a block storage volume by Id, send a POST request to @@ -1085,8 +1085,8 @@ async def initiate_by_id( *, size_gigabytes: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1104,13 +1104,13 @@ async def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: """ To initiate an action on a block storage volume by Id, send a POST request to @@ -1177,10 +1177,10 @@ async def initiate_by_id( self, volume_id: str, *, - droplet_id: int | NotGiven = NOT_GIVEN, + droplet_id: int | Omit = omit, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1198,15 +1198,15 @@ async def initiate_by_id( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, - size_gigabytes: int | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, + size_gigabytes: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByIDResponse: if not volume_id: raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") @@ -1246,8 +1246,8 @@ async def initiate_by_name( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1265,14 +1265,14 @@ async def initiate_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByNameResponse: """ To initiate an action on a block storage volume by Name, send a POST request to @@ -1338,8 +1338,8 @@ async def initiate_by_name( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1357,13 +1357,13 @@ async def initiate_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByNameResponse: """ To initiate an action on a block storage volume by Name, send a POST request to @@ -1424,8 +1424,8 @@ async def initiate_by_name( *, droplet_id: int, type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -1443,14 +1443,14 @@ async def initiate_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ActionInitiateByNameResponse: return await self._post( "/v2/volumes/actions" diff --git a/src/gradient/resources/gpu_droplets/volumes/snapshots.py b/src/gradient/resources/gpu_droplets/volumes/snapshots.py index 7e925264..694de074 100644 --- a/src/gradient/resources/gpu_droplets/volumes/snapshots.py +++ b/src/gradient/resources/gpu_droplets/volumes/snapshots.py @@ -6,7 +6,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -50,13 +50,13 @@ def create( volume_id: str, *, name: str, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotCreateResponse: """ To create a snapshot from a volume, sent a POST request to @@ -106,7 +106,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotRetrieveResponse: """ To retrieve the details of a snapshot that has been created from a volume, send @@ -137,14 +137,14 @@ def list( self, volume_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotListResponse: """ To retrieve the snapshots that have been created from a volume, send a GET @@ -194,7 +194,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a volume snapshot, send a DELETE request to @@ -251,13 +251,13 @@ async def create( volume_id: str, *, name: str, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotCreateResponse: """ To create a snapshot from a volume, sent a POST request to @@ -307,7 +307,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotRetrieveResponse: """ To retrieve the details of a snapshot that has been created from a volume, send @@ -338,14 +338,14 @@ async def list( self, volume_id: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> SnapshotListResponse: """ To retrieve the snapshots that have been created from a volume, send a GET @@ -395,7 +395,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a volume snapshot, send a DELETE request to diff --git a/src/gradient/resources/gpu_droplets/volumes/volumes.py b/src/gradient/resources/gpu_droplets/volumes/volumes.py index ee980a10..fb86c288 100644 --- a/src/gradient/resources/gpu_droplets/volumes/volumes.py +++ b/src/gradient/resources/gpu_droplets/volumes/volumes.py @@ -15,7 +15,7 @@ ActionsResourceWithStreamingResponse, AsyncActionsResourceWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr +from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given from ...._utils import required_args, maybe_transform, async_maybe_transform from .snapshots import ( SnapshotsResource, @@ -93,17 +93,17 @@ def create( "syd1", ], size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + filesystem_label: str | Omit = omit, + filesystem_type: str | Omit = omit, + snapshot_id: str | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeCreateResponse: """To create a new volume, send a POST request to `/v2/volumes`. @@ -178,17 +178,17 @@ def create( "syd1", ], size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + filesystem_label: str | Omit = omit, + filesystem_type: str | Omit = omit, + snapshot_id: str | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeCreateResponse: """To create a new volume, send a POST request to `/v2/volumes`. @@ -263,17 +263,17 @@ def create( "syd1", ], size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + filesystem_label: str | Omit = omit, + filesystem_type: str | Omit = omit, + snapshot_id: str | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeCreateResponse: return self._post( "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", @@ -305,7 +305,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeRetrieveResponse: """ To show information about a block storage volume, send a GET request to @@ -335,9 +335,9 @@ def retrieve( def list( self, *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + name: str | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -355,13 +355,13 @@ def list( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeListResponse: """ To list all of the block storage volumes available on your account, send a GET @@ -434,7 +434,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a block storage volume, destroying all data and removing it from your @@ -468,7 +468,7 @@ def delete( def delete_by_name( self, *, - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, region: Literal[ "ams1", "ams2", @@ -486,13 +486,13 @@ def delete_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Block storage volumes may also be deleted by name by sending a DELETE request @@ -586,17 +586,17 @@ async def create( "syd1", ], size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + filesystem_label: str | Omit = omit, + filesystem_type: str | Omit = omit, + snapshot_id: str | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeCreateResponse: """To create a new volume, send a POST request to `/v2/volumes`. @@ -671,17 +671,17 @@ async def create( "syd1", ], size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + filesystem_label: str | Omit = omit, + filesystem_type: str | Omit = omit, + snapshot_id: str | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeCreateResponse: """To create a new volume, send a POST request to `/v2/volumes`. @@ -756,17 +756,17 @@ async def create( "syd1", ], size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN, + description: str | Omit = omit, + filesystem_label: str | Omit = omit, + filesystem_type: str | Omit = omit, + snapshot_id: str | Omit = omit, + tags: Optional[SequenceNotStr[str]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeCreateResponse: return await self._post( "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", @@ -798,7 +798,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeRetrieveResponse: """ To show information about a block storage volume, send a GET request to @@ -828,9 +828,9 @@ async def retrieve( async def list( self, *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + name: str | Omit = omit, + page: int | Omit = omit, + per_page: int | Omit = omit, region: Literal[ "ams1", "ams2", @@ -848,13 +848,13 @@ async def list( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> VolumeListResponse: """ To list all of the block storage volumes available on your account, send a GET @@ -927,7 +927,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ To delete a block storage volume, destroying all data and removing it from your @@ -961,7 +961,7 @@ async def delete( async def delete_by_name( self, *, - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, region: Literal[ "ams1", "ams2", @@ -979,13 +979,13 @@ async def delete_by_name( "tor1", "syd1", ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> None: """ Block storage volumes may also be deleted by name by sending a DELETE request diff --git a/src/gradient/resources/inference/api_keys.py b/src/gradient/resources/inference/api_keys.py index fa7f86dc..8dfa54e1 100644 --- a/src/gradient/resources/inference/api_keys.py +++ b/src/gradient/resources/inference/api_keys.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -48,13 +48,13 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: def create( self, *, - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyCreateResponse: """ To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. @@ -85,14 +85,14 @@ def update( self, path_api_key_uuid: str, *, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyUpdateResponse: """ To update a model API key, send a PUT request to @@ -133,14 +133,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyListResponse: """ To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. @@ -187,7 +187,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyDeleteResponse: """ To delete an API key for a model, send a DELETE request to @@ -223,7 +223,7 @@ def update_regenerate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyUpdateRegenerateResponse: """ To regenerate a model API key, send a PUT request to @@ -274,13 +274,13 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: async def create( self, *, - name: str | NotGiven = NOT_GIVEN, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyCreateResponse: """ To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. @@ -311,14 +311,14 @@ async def update( self, path_api_key_uuid: str, *, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyUpdateResponse: """ To update a model API key, send a PUT request to @@ -359,14 +359,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyListResponse: """ To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. @@ -413,7 +413,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyDeleteResponse: """ To delete an API key for a model, send a DELETE request to @@ -449,7 +449,7 @@ async def update_regenerate( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> APIKeyUpdateRegenerateResponse: """ To regenerate a model API key, send a PUT request to diff --git a/src/gradient/resources/knowledge_bases/data_sources.py b/src/gradient/resources/knowledge_bases/data_sources.py index 16252324..083ea45f 100644 --- a/src/gradient/resources/knowledge_bases/data_sources.py +++ b/src/gradient/resources/knowledge_bases/data_sources.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -53,16 +53,16 @@ def create( self, path_knowledge_base_uuid: str, *, - aws_data_source: AwsDataSourceParam | NotGiven = NOT_GIVEN, - body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, - web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + aws_data_source: AwsDataSourceParam | Omit = omit, + body_knowledge_base_uuid: str | Omit = omit, + spaces_data_source: APISpacesDataSourceParam | Omit = omit, + web_crawler_data_source: APIWebCrawlerDataSourceParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DataSourceCreateResponse: """ To add a data source to a knowledge base, send a POST request to @@ -112,14 +112,14 @@ def list( self, knowledge_base_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DataSourceListResponse: """ To list all data sources for a knowledge base, send a GET request to @@ -172,7 +172,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DataSourceDeleteResponse: """ To delete a data source from a knowledge base, send a DELETE request to @@ -228,16 +228,16 @@ async def create( self, path_knowledge_base_uuid: str, *, - aws_data_source: AwsDataSourceParam | NotGiven = NOT_GIVEN, - body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, - web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + aws_data_source: AwsDataSourceParam | Omit = omit, + body_knowledge_base_uuid: str | Omit = omit, + spaces_data_source: APISpacesDataSourceParam | Omit = omit, + web_crawler_data_source: APIWebCrawlerDataSourceParam | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DataSourceCreateResponse: """ To add a data source to a knowledge base, send a POST request to @@ -287,14 +287,14 @@ async def list( self, knowledge_base_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DataSourceListResponse: """ To list all data sources for a knowledge base, send a GET request to @@ -347,7 +347,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> DataSourceDeleteResponse: """ To delete a data source from a knowledge base, send a DELETE request to diff --git a/src/gradient/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py index 41e7da76..95898c2a 100644 --- a/src/gradient/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py @@ -4,7 +4,7 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -52,14 +52,14 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: def create( self, *, - data_source_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + data_source_uuids: SequenceNotStr[str] | Omit = omit, + knowledge_base_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobCreateResponse: """ To start an indexing job for a knowledge base, send a POST request to @@ -105,7 +105,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobRetrieveResponse: """ To get status of an indexing Job for a knowledge base, send a GET request to @@ -135,14 +135,14 @@ def retrieve( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobListResponse: """ To list all indexing jobs for a knowledge base, send a GET request to @@ -190,7 +190,7 @@ def retrieve_data_sources( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobRetrieveDataSourcesResponse: """ To list all datasources for an indexing job, send a GET request to @@ -221,13 +221,13 @@ def update_cancel( self, path_uuid: str, *, - body_uuid: str | NotGiven = NOT_GIVEN, + body_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobUpdateCancelResponse: """ To cancel an indexing job for a knowledge base, send a PUT request to @@ -283,14 +283,14 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo async def create( self, *, - data_source_uuids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + data_source_uuids: SequenceNotStr[str] | Omit = omit, + knowledge_base_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobCreateResponse: """ To start an indexing job for a knowledge base, send a POST request to @@ -336,7 +336,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobRetrieveResponse: """ To get status of an indexing Job for a knowledge base, send a GET request to @@ -366,14 +366,14 @@ async def retrieve( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobListResponse: """ To list all indexing jobs for a knowledge base, send a GET request to @@ -421,7 +421,7 @@ async def retrieve_data_sources( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobRetrieveDataSourcesResponse: """ To list all datasources for an indexing job, send a GET request to @@ -452,13 +452,13 @@ async def update_cancel( self, path_uuid: str, *, - body_uuid: str | NotGiven = NOT_GIVEN, + body_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> IndexingJobUpdateCancelResponse: """ To cancel an indexing job for a knowledge base, send a PUT request to diff --git a/src/gradient/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py index 61fc85a8..00fa0659 100644 --- a/src/gradient/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py @@ -7,7 +7,7 @@ import httpx from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr +from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -74,20 +74,20 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse def create( self, *, - database_id: str | NotGiven = NOT_GIVEN, - datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + database_id: str | Omit = omit, + datasources: Iterable[knowledge_base_create_params.Datasource] | Omit = omit, + embedding_model_uuid: str | Omit = omit, + name: str | Omit = omit, + project_id: str | Omit = omit, + region: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseCreateResponse: """ To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. @@ -154,7 +154,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseRetrieveResponse: """ To retrive information about an existing knowledge base, send a GET request to @@ -185,18 +185,18 @@ def update( self, path_uuid: str, *, - database_id: str | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, + database_id: str | Omit = omit, + embedding_model_uuid: str | Omit = omit, + name: str | Omit = omit, + project_id: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + body_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseUpdateResponse: """ To update a knowledge base, send a PUT request to @@ -249,14 +249,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseListResponse: """ To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. @@ -303,7 +303,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseDeleteResponse: """ To delete a knowledge base, send a DELETE request to @@ -362,20 +362,20 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes async def create( self, *, - database_id: str | NotGiven = NOT_GIVEN, - datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, + database_id: str | Omit = omit, + datasources: Iterable[knowledge_base_create_params.Datasource] | Omit = omit, + embedding_model_uuid: str | Omit = omit, + name: str | Omit = omit, + project_id: str | Omit = omit, + region: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + vpc_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseCreateResponse: """ To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. @@ -442,7 +442,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseRetrieveResponse: """ To retrive information about an existing knowledge base, send a GET request to @@ -473,18 +473,18 @@ async def update( self, path_uuid: str, *, - database_id: str | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - tags: SequenceNotStr[str] | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, + database_id: str | Omit = omit, + embedding_model_uuid: str | Omit = omit, + name: str | Omit = omit, + project_id: str | Omit = omit, + tags: SequenceNotStr[str] | Omit = omit, + body_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseUpdateResponse: """ To update a knowledge base, send a PUT request to @@ -537,14 +537,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseListResponse: """ To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. @@ -591,7 +591,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> KnowledgeBaseDeleteResponse: """ To delete a knowledge base, send a DELETE request to diff --git a/src/gradient/resources/models/models.py b/src/gradient/resources/models/models.py index ffb5fea9..650c49c9 100644 --- a/src/gradient/resources/models/models.py +++ b/src/gradient/resources/models/models.py @@ -8,7 +8,7 @@ import httpx from ...types import model_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -59,9 +59,9 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + public_only: bool | Omit = omit, usecases: List[ Literal[ "MODEL_USECASE_UNKNOWN", @@ -73,13 +73,13 @@ def list( "MODEL_USECASE_SERVERLESS", ] ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModelListResponse: """ To list all models, send a GET request to `/v2/gen-ai/models`. @@ -160,9 +160,9 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, + public_only: bool | Omit = omit, usecases: List[ Literal[ "MODEL_USECASE_UNKNOWN", @@ -174,13 +174,13 @@ async def list( "MODEL_USECASE_SERVERLESS", ] ] - | NotGiven = NOT_GIVEN, + | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ModelListResponse: """ To list all models, send a GET request to `/v2/gen-ai/models`. diff --git a/src/gradient/resources/models/providers/anthropic.py b/src/gradient/resources/models/providers/anthropic.py index ddb0eef8..33b2ec80 100644 --- a/src/gradient/resources/models/providers/anthropic.py +++ b/src/gradient/resources/models/providers/anthropic.py @@ -4,7 +4,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -54,14 +54,14 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicCreateResponse: """ To create an Anthropic API key, send a POST request to @@ -106,7 +106,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicRetrieveResponse: """ To retrieve details of an Anthropic API key, send a GET request to @@ -137,15 +137,15 @@ def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicUpdateResponse: """ To update an Anthropic API key, send a PUT request to @@ -189,14 +189,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicListResponse: """ To list all Anthropic API keys, send a GET request to @@ -244,7 +244,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicDeleteResponse: """ To delete an Anthropic API key, send a DELETE request to @@ -275,14 +275,14 @@ def list_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicListAgentsResponse: """ List Agents by Anthropic Key. @@ -346,14 +346,14 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse async def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicCreateResponse: """ To create an Anthropic API key, send a POST request to @@ -398,7 +398,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicRetrieveResponse: """ To retrieve details of an Anthropic API key, send a GET request to @@ -429,15 +429,15 @@ async def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicUpdateResponse: """ To update an Anthropic API key, send a PUT request to @@ -481,14 +481,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicListResponse: """ To list all Anthropic API keys, send a GET request to @@ -536,7 +536,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicDeleteResponse: """ To delete an Anthropic API key, send a DELETE request to @@ -567,14 +567,14 @@ async def list_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AnthropicListAgentsResponse: """ List Agents by Anthropic Key. diff --git a/src/gradient/resources/models/providers/openai.py b/src/gradient/resources/models/providers/openai.py index 166e284d..5bdc3f20 100644 --- a/src/gradient/resources/models/providers/openai.py +++ b/src/gradient/resources/models/providers/openai.py @@ -4,7 +4,7 @@ import httpx -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -54,14 +54,14 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAICreateResponse: """ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. @@ -105,7 +105,7 @@ def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIRetrieveResponse: """ To retrieve details of an OpenAI API key, send a GET request to @@ -136,15 +136,15 @@ def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIUpdateResponse: """ To update an OpenAI API key, send a PUT request to @@ -188,14 +188,14 @@ def update( def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIListResponse: """ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. @@ -242,7 +242,7 @@ def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIDeleteResponse: """ To delete an OpenAI API key, send a DELETE request to @@ -273,14 +273,14 @@ def retrieve_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIRetrieveAgentsResponse: """ List Agents by OpenAI Key. @@ -344,14 +344,14 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: async def create( self, *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAICreateResponse: """ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. @@ -395,7 +395,7 @@ async def retrieve( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIRetrieveResponse: """ To retrieve details of an OpenAI API key, send a GET request to @@ -426,15 +426,15 @@ async def update( self, path_api_key_uuid: str, *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, + api_key: str | Omit = omit, + body_api_key_uuid: str | Omit = omit, + name: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIUpdateResponse: """ To update an OpenAI API key, send a PUT request to @@ -478,14 +478,14 @@ async def update( async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIListResponse: """ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. @@ -532,7 +532,7 @@ async def delete( extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIDeleteResponse: """ To delete an OpenAI API key, send a DELETE request to @@ -563,14 +563,14 @@ async def retrieve_agents( self, uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> OpenAIRetrieveAgentsResponse: """ List Agents by OpenAI Key. diff --git a/src/gradient/resources/regions.py b/src/gradient/resources/regions.py index 779bd4dd..3b0f22fa 100644 --- a/src/gradient/resources/regions.py +++ b/src/gradient/resources/regions.py @@ -5,7 +5,7 @@ import httpx from ..types import region_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -44,14 +44,14 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RegionListResponse: """ To list all of the regions that are available, send a GET request to @@ -114,14 +114,14 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + page: int | Omit = omit, + per_page: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> RegionListResponse: """ To list all of the regions that are available, send a GET request to diff --git a/tests/test_transform.py b/tests/test_transform.py index db909f25..098015a9 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,7 +8,7 @@ import pytest -from gradient._types import NOT_GIVEN, Base64FileInput +from gradient._types import Base64FileInput, omit, not_given from gradient._utils import ( PropertyInfo, transform as _transform, @@ -450,4 +450,11 @@ async def test_transform_skipping(use_async: bool) -> None: @pytest.mark.asyncio async def test_strips_notgiven(use_async: bool) -> None: assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} - assert await transform({"foo_bar": NOT_GIVEN}, Foo1, use_async) == {} + assert await transform({"foo_bar": not_given}, Foo1, use_async) == {} + + +@parametrize +@pytest.mark.asyncio +async def test_strips_omit(use_async: bool) -> None: + assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"} + assert await transform({"foo_bar": omit}, Foo1, use_async) == {} From cfe7331eeeea883e78add1aae719ce8e3c8b9bad Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 20 Sep 2025 03:46:01 +0000 Subject: [PATCH 173/200] chore: do not install brew dependencies in ./scripts/bootstrap by default --- scripts/bootstrap | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/bootstrap b/scripts/bootstrap index e84fe62c..b430fee3 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,10 +4,18 @@ set -e cd "$(dirname "$0")/.." -if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then brew bundle check >/dev/null 2>&1 || { - echo "==> Installing Homebrew dependencies…" - brew bundle + echo -n "==> Install Homebrew dependencies? (y/N): " + read -r response + case "$response" in + [yY][eE][sS]|[yY]) + brew bundle + ;; + *) + ;; + esac + echo } fi From bd5a5780d2aa56beeb6ea117589b26f222c73cd6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 23 Sep 2025 03:07:55 +0000 Subject: [PATCH 174/200] chore: improve example values --- .../evaluation_metrics/anthropic/test_keys.py | 24 ++++++------- .../evaluation_metrics/openai/test_keys.py | 24 ++++++------- .../evaluation_metrics/test_workspaces.py | 36 +++++++++---------- .../agents/test_evaluation_runs.py | 12 +++---- .../agents/test_knowledge_bases.py | 12 +++---- tests/api_resources/agents/test_routes.py | 12 +++---- .../databases/schema_registry/test_config.py | 12 +++---- .../gpu_droplets/floating_ips/test_actions.py | 12 +++---- .../gpu_droplets/images/test_actions.py | 12 +++---- .../gpu_droplets/test_autoscale.py | 24 ++++++------- .../gpu_droplets/test_backups.py | 12 +++---- .../test_destroy_with_associated_resources.py | 36 +++++++++---------- .../gpu_droplets/test_firewalls.py | 24 ++++++------- .../gpu_droplets/test_floating_ips.py | 24 ++++++------- .../api_resources/gpu_droplets/test_images.py | 12 +++---- .../gpu_droplets/test_load_balancers.py | 36 +++++++++---------- .../gpu_droplets/test_volumes.py | 24 ++++++------- .../gpu_droplets/volumes/test_snapshots.py | 24 ++++++------- .../api_resources/inference/test_api_keys.py | 24 ++++++------- .../knowledge_bases/test_indexing_jobs.py | 24 ++++++------- .../models/providers/test_anthropic.py | 24 ++++++------- .../models/providers/test_openai.py | 24 ++++++------- tests/api_resources/test_agents.py | 24 ++++++------- tests/api_resources/test_gpu_droplets.py | 36 +++++++++---------- tests/api_resources/test_knowledge_bases.py | 24 ++++++------- 25 files changed, 276 insertions(+), 276 deletions(-) diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py index b6b461e6..5028698c 100644 --- a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py +++ b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py @@ -65,7 +65,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -73,7 +73,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -85,7 +85,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -197,7 +197,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.anthropic.keys.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -205,7 +205,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -217,7 +217,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -334,7 +334,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -342,7 +342,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -354,7 +354,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -466,7 +466,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.anthropic.keys.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -474,7 +474,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -486,7 +486,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py index da5cf8e1..7da165c2 100644 --- a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py +++ b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py @@ -65,7 +65,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -73,7 +73,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -85,7 +85,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -197,7 +197,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: key = client.agents.evaluation_metrics.openai.keys.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -205,7 +205,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -217,7 +217,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -334,7 +334,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyRetrieveResponse, key, path=["response"]) @@ -342,7 +342,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -354,7 +354,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -466,7 +466,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: key = await async_client.agents.evaluation_metrics.openai.keys.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KeyDeleteResponse, key, path=["response"]) @@ -474,7 +474,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -486,7 +486,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index 3acede09..4f85212d 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -66,7 +66,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.retrieve( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) @@ -74,7 +74,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -86,7 +86,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,7 +189,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.delete( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) @@ -197,7 +197,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.delete( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -209,7 +209,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -231,7 +231,7 @@ def test_path_params_delete(self, client: Gradient) -> None: @parametrize def test_method_list_evaluation_test_cases(self, client: Gradient) -> None: workspace = client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) @@ -239,7 +239,7 @@ def test_method_list_evaluation_test_cases(self, client: Gradient) -> None: @parametrize def test_raw_response_list_evaluation_test_cases(self, client: Gradient) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -251,7 +251,7 @@ def test_raw_response_list_evaluation_test_cases(self, client: Gradient) -> None @parametrize def test_streaming_response_list_evaluation_test_cases(self, client: Gradient) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -317,7 +317,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.retrieve( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"]) @@ -325,7 +325,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -337,7 +337,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -440,7 +440,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.delete( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"]) @@ -448,7 +448,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -460,7 +460,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.delete( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -482,7 +482,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"]) @@ -490,7 +490,7 @@ async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradie @parametrize async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -502,7 +502,7 @@ async def test_raw_response_list_evaluation_test_cases(self, async_client: Async @parametrize async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases( - "workspace_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 8d7e1826..c6acaf82 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: evaluation_run = client.agents.evaluation_runs.retrieve( - "evaluation_run_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve( - "evaluation_run_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve( - "evaluation_run_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,7 +248,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve( - "evaluation_run_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) @@ -256,7 +256,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve( - "evaluation_run_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -268,7 +268,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve( - "evaluation_run_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 60dae7d0..2cf09753 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -21,7 +21,7 @@ class TestKnowledgeBases: @parametrize def test_method_attach(self, client: Gradient) -> None: knowledge_base = client.agents.knowledge_bases.attach( - "agent_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -29,7 +29,7 @@ def test_method_attach(self, client: Gradient) -> None: @parametrize def test_raw_response_attach(self, client: Gradient) -> None: response = client.agents.knowledge_bases.with_raw_response.attach( - "agent_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -41,7 +41,7 @@ def test_raw_response_attach(self, client: Gradient) -> None: @parametrize def test_streaming_response_attach(self, client: Gradient) -> None: with client.agents.knowledge_bases.with_streaming_response.attach( - "agent_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -173,7 +173,7 @@ class TestAsyncKnowledgeBases: @parametrize async def test_method_attach(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach( - "agent_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -181,7 +181,7 @@ async def test_method_attach(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_attach(self, async_client: AsyncGradient) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach( - "agent_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -193,7 +193,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_attach(self, async_client: AsyncGradient) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach( - "agent_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 37bc4eac..3444dcc7 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -213,7 +213,7 @@ def test_path_params_add(self, client: Gradient) -> None: @parametrize def test_method_view(self, client: Gradient) -> None: route = client.agents.routes.view( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteViewResponse, route, path=["response"]) @@ -221,7 +221,7 @@ def test_method_view(self, client: Gradient) -> None: @parametrize def test_raw_response_view(self, client: Gradient) -> None: response = client.agents.routes.with_raw_response.view( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -233,7 +233,7 @@ def test_raw_response_view(self, client: Gradient) -> None: @parametrize def test_streaming_response_view(self, client: Gradient) -> None: with client.agents.routes.with_streaming_response.view( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -448,7 +448,7 @@ async def test_path_params_add(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_view(self, async_client: AsyncGradient) -> None: route = await async_client.agents.routes.view( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteViewResponse, route, path=["response"]) @@ -456,7 +456,7 @@ async def test_method_view(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_view(self, async_client: AsyncGradient) -> None: response = await async_client.agents.routes.with_raw_response.view( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -468,7 +468,7 @@ async def test_raw_response_view(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_view(self, async_client: AsyncGradient) -> None: async with async_client.agents.routes.with_streaming_response.view( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/databases/schema_registry/test_config.py b/tests/api_resources/databases/schema_registry/test_config.py index b1d21f62..ebd60c4c 100644 --- a/tests/api_resources/databases/schema_registry/test_config.py +++ b/tests/api_resources/databases/schema_registry/test_config.py @@ -26,7 +26,7 @@ class TestConfig: @parametrize def test_method_retrieve(self, client: Gradient) -> None: config = client.databases.schema_registry.config.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) @@ -34,7 +34,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.databases.schema_registry.config.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) assert response.is_closed is True @@ -46,7 +46,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.databases.schema_registry.config.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -229,7 +229,7 @@ class TestAsyncConfig: @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: config = await async_client.databases.schema_registry.config.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) assert_matches_type(ConfigRetrieveResponse, config, path=["response"]) @@ -237,7 +237,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.databases.schema_registry.config.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) assert response.is_closed is True @@ -249,7 +249,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.databases.schema_registry.config.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "9cc10173-e9ea-4176-9dbc-a4cee4c4ff30", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index 0d678103..7f7ab06a 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -167,7 +167,7 @@ def test_path_params_retrieve(self, client: Gradient) -> None: @parametrize def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.floating_ips.actions.list( - "192.168.1.1", + "45.55.96.47", ) assert_matches_type(ActionListResponse, action, path=["response"]) @@ -175,7 +175,7 @@ def test_method_list(self, client: Gradient) -> None: @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.actions.with_raw_response.list( - "192.168.1.1", + "45.55.96.47", ) assert response.is_closed is True @@ -187,7 +187,7 @@ def test_raw_response_list(self, client: Gradient) -> None: @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.actions.with_streaming_response.list( - "192.168.1.1", + "45.55.96.47", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,7 +357,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.floating_ips.actions.list( - "192.168.1.1", + "45.55.96.47", ) assert_matches_type(ActionListResponse, action, path=["response"]) @@ -365,7 +365,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( - "192.168.1.1", + "45.55.96.47", ) assert response.is_closed is True @@ -377,7 +377,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.list( - "192.168.1.1", + "45.55.96.47", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index ad8b9585..ad5d4892 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -136,7 +136,7 @@ def test_streaming_response_retrieve(self, client: Gradient) -> None: @parametrize def test_method_list(self, client: Gradient) -> None: action = client.gpu_droplets.images.actions.list( - 0, + 62137902, ) assert_matches_type(ActionListResponse, action, path=["response"]) @@ -144,7 +144,7 @@ def test_method_list(self, client: Gradient) -> None: @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.images.actions.with_raw_response.list( - 0, + 62137902, ) assert response.is_closed is True @@ -156,7 +156,7 @@ def test_raw_response_list(self, client: Gradient) -> None: @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.images.actions.with_streaming_response.list( - 0, + 62137902, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -290,7 +290,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: action = await async_client.gpu_droplets.images.actions.list( - 0, + 62137902, ) assert_matches_type(ActionListResponse, action, path=["response"]) @@ -298,7 +298,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.actions.with_raw_response.list( - 0, + 62137902, ) assert response.is_closed is True @@ -310,7 +310,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.actions.with_streaming_response.list( - 0, + 62137902, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index cbf67b19..bbb0c2e4 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -120,7 +120,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.retrieve( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) @@ -128,7 +128,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.retrieve( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert response.is_closed is True @@ -140,7 +140,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.retrieve( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -297,7 +297,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: autoscale = client.gpu_droplets.autoscale.delete( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert autoscale is None @@ -305,7 +305,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.autoscale.with_raw_response.delete( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert response.is_closed is True @@ -317,7 +317,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.autoscale.with_streaming_response.delete( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -587,7 +587,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.retrieve( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) @@ -595,7 +595,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert response.is_closed is True @@ -607,7 +607,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.retrieve( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -764,7 +764,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: autoscale = await async_client.gpu_droplets.autoscale.delete( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert autoscale is None @@ -772,7 +772,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.autoscale.with_raw_response.delete( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) assert response.is_closed is True @@ -784,7 +784,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.autoscale.with_streaming_response.delete( - "autoscale_pool_id", + "0d3db13e-a604-4944-9827-7ec2642d32ac", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index 4a0d36b9..c6e854e4 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -135,7 +135,7 @@ def test_streaming_response_list_supported_policies(self, client: Gradient) -> N @parametrize def test_method_retrieve_policy(self, client: Gradient) -> None: backup = client.gpu_droplets.backups.retrieve_policy( - 1, + 3164444, ) assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) @@ -143,7 +143,7 @@ def test_method_retrieve_policy(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve_policy(self, client: Gradient) -> None: response = client.gpu_droplets.backups.with_raw_response.retrieve_policy( - 1, + 3164444, ) assert response.is_closed is True @@ -155,7 +155,7 @@ def test_raw_response_retrieve_policy(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve_policy(self, client: Gradient) -> None: with client.gpu_droplets.backups.with_streaming_response.retrieve_policy( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -284,7 +284,7 @@ async def test_streaming_response_list_supported_policies(self, async_client: As @parametrize async def test_method_retrieve_policy(self, async_client: AsyncGradient) -> None: backup = await async_client.gpu_droplets.backups.retrieve_policy( - 1, + 3164444, ) assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) @@ -292,7 +292,7 @@ async def test_method_retrieve_policy(self, async_client: AsyncGradient) -> None @parametrize async def test_raw_response_retrieve_policy(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.backups.with_raw_response.retrieve_policy( - 1, + 3164444, ) assert response.is_closed is True @@ -304,7 +304,7 @@ async def test_raw_response_retrieve_policy(self, async_client: AsyncGradient) - @parametrize async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.backups.with_streaming_response.retrieve_policy( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index 166206d2..80f1bd7c 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -24,7 +24,7 @@ class TestDestroyWithAssociatedResources: @parametrize def test_method_list(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.list( - 1, + 3164444, ) assert_matches_type( DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] @@ -34,7 +34,7 @@ def test_method_list(self, client: Gradient) -> None: @parametrize def test_raw_response_list(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( - 1, + 3164444, ) assert response.is_closed is True @@ -48,7 +48,7 @@ def test_raw_response_list(self, client: Gradient) -> None: @parametrize def test_streaming_response_list(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -64,7 +64,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_check_status(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.check_status( - 1, + 3164444, ) assert_matches_type( DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] @@ -74,7 +74,7 @@ def test_method_check_status(self, client: Gradient) -> None: @parametrize def test_raw_response_check_status(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( - 1, + 3164444, ) assert response.is_closed is True @@ -88,7 +88,7 @@ def test_raw_response_check_status(self, client: Gradient) -> None: @parametrize def test_streaming_response_check_status(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -188,7 +188,7 @@ def test_streaming_response_delete_selective(self, client: Gradient) -> None: @parametrize def test_method_retry(self, client: Gradient) -> None: destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.retry( - 1, + 3164444, ) assert destroy_with_associated_resource is None @@ -196,7 +196,7 @@ def test_method_retry(self, client: Gradient) -> None: @parametrize def test_raw_response_retry(self, client: Gradient) -> None: response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( - 1, + 3164444, ) assert response.is_closed is True @@ -208,7 +208,7 @@ def test_raw_response_retry(self, client: Gradient) -> None: @parametrize def test_streaming_response_retry(self, client: Gradient) -> None: with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -228,7 +228,7 @@ class TestAsyncDestroyWithAssociatedResources: @parametrize async def test_method_list(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.list( - 1, + 3164444, ) assert_matches_type( DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] @@ -238,7 +238,7 @@ async def test_method_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_list(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( - 1, + 3164444, ) assert response.is_closed is True @@ -252,7 +252,7 @@ async def test_raw_response_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -269,7 +269,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non async def test_method_check_status(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = ( await async_client.gpu_droplets.destroy_with_associated_resources.check_status( - 1, + 3164444, ) ) assert_matches_type( @@ -280,7 +280,7 @@ async def test_method_check_status(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_check_status(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( - 1, + 3164444, ) assert response.is_closed is True @@ -294,7 +294,7 @@ async def test_raw_response_check_status(self, async_client: AsyncGradient) -> N @parametrize async def test_streaming_response_check_status(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -400,7 +400,7 @@ async def test_streaming_response_delete_selective(self, async_client: AsyncGrad @parametrize async def test_method_retry(self, async_client: AsyncGradient) -> None: destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.retry( - 1, + 3164444, ) assert destroy_with_associated_resource is None @@ -408,7 +408,7 @@ async def test_method_retry(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retry(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( - 1, + 3164444, ) assert response.is_closed is True @@ -420,7 +420,7 @@ async def test_raw_response_retry(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retry(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 83142b93..3d8469b3 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -103,7 +103,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) @@ -111,7 +111,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert response.is_closed is True @@ -123,7 +123,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,7 +278,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: firewall = client.gpu_droplets.firewalls.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert firewall is None @@ -286,7 +286,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.firewalls.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert response.is_closed is True @@ -298,7 +298,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.firewalls.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -403,7 +403,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) @@ -411,7 +411,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert response.is_closed is True @@ -423,7 +423,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -578,7 +578,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: firewall = await async_client.gpu_droplets.firewalls.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert firewall is None @@ -586,7 +586,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.firewalls.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) assert response.is_closed is True @@ -598,7 +598,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.firewalls.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "bb4b2611-3d72-467b-8602-280330ecd65c", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index c252a24a..3119bf28 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -102,7 +102,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.retrieve( - "192.168.1.1", + "45.55.96.47", ) assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) @@ -110,7 +110,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.retrieve( - "192.168.1.1", + "45.55.96.47", ) assert response.is_closed is True @@ -122,7 +122,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.retrieve( - "192.168.1.1", + "45.55.96.47", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -181,7 +181,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: floating_ip = client.gpu_droplets.floating_ips.delete( - "192.168.1.1", + "45.55.96.47", ) assert floating_ip is None @@ -189,7 +189,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.floating_ips.with_raw_response.delete( - "192.168.1.1", + "45.55.96.47", ) assert response.is_closed is True @@ -201,7 +201,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.floating_ips.with_streaming_response.delete( - "192.168.1.1", + "45.55.96.47", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -306,7 +306,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.retrieve( - "192.168.1.1", + "45.55.96.47", ) assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) @@ -314,7 +314,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( - "192.168.1.1", + "45.55.96.47", ) assert response.is_closed is True @@ -326,7 +326,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.retrieve( - "192.168.1.1", + "45.55.96.47", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -385,7 +385,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: floating_ip = await async_client.gpu_droplets.floating_ips.delete( - "192.168.1.1", + "45.55.96.47", ) assert floating_ip is None @@ -393,7 +393,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.floating_ips.with_raw_response.delete( - "192.168.1.1", + "45.55.96.47", ) assert response.is_closed is True @@ -405,7 +405,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.floating_ips.with_streaming_response.delete( - "192.168.1.1", + "45.55.96.47", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index 8f81912d..4c4146e2 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -186,7 +186,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: image = client.gpu_droplets.images.delete( - 0, + 62137902, ) assert image is None @@ -194,7 +194,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.images.with_raw_response.delete( - 0, + 62137902, ) assert response.is_closed is True @@ -206,7 +206,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.images.with_streaming_response.delete( - 0, + 62137902, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -386,7 +386,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: image = await async_client.gpu_droplets.images.delete( - 0, + 62137902, ) assert image is None @@ -394,7 +394,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.images.with_raw_response.delete( - 0, + 62137902, ) assert response.is_closed is True @@ -406,7 +406,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.images.with_streaming_response.delete( - 0, + 62137902, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index 5db3c20b..363520e4 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -278,7 +278,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.retrieve( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) @@ -286,7 +286,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.retrieve( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert response.is_closed is True @@ -298,7 +298,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.retrieve( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -649,7 +649,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.delete( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert load_balancer is None @@ -657,7 +657,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.delete( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert response.is_closed is True @@ -669,7 +669,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.delete( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -691,7 +691,7 @@ def test_path_params_delete(self, client: Gradient) -> None: @parametrize def test_method_delete_cache(self, client: Gradient) -> None: load_balancer = client.gpu_droplets.load_balancers.delete_cache( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert load_balancer is None @@ -699,7 +699,7 @@ def test_method_delete_cache(self, client: Gradient) -> None: @parametrize def test_raw_response_delete_cache(self, client: Gradient) -> None: response = client.gpu_droplets.load_balancers.with_raw_response.delete_cache( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert response.is_closed is True @@ -711,7 +711,7 @@ def test_raw_response_delete_cache(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete_cache(self, client: Gradient) -> None: with client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -991,7 +991,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.retrieve( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) @@ -999,7 +999,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert response.is_closed is True @@ -1011,7 +1011,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.retrieve( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1362,7 +1362,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.delete( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert load_balancer is None @@ -1370,7 +1370,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert response.is_closed is True @@ -1382,7 +1382,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1404,7 +1404,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_delete_cache(self, async_client: AsyncGradient) -> None: load_balancer = await async_client.gpu_droplets.load_balancers.delete_cache( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert load_balancer is None @@ -1412,7 +1412,7 @@ async def test_method_delete_cache(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete_cache(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) assert response.is_closed is True @@ -1424,7 +1424,7 @@ async def test_raw_response_delete_cache(self, async_client: AsyncGradient) -> N @parametrize async def test_streaming_response_delete_cache(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( - "lb_id", + "4de7ac8b-495b-4884-9a69-1050c6793cd6", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index f9b3778c..8243625d 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -135,7 +135,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) @@ -143,7 +143,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert response.is_closed is True @@ -155,7 +155,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -216,7 +216,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: volume = client.gpu_droplets.volumes.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert volume is None @@ -224,7 +224,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert response.is_closed is True @@ -236,7 +236,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.volumes.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -411,7 +411,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) @@ -419,7 +419,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert response.is_closed is True @@ -431,7 +431,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -492,7 +492,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: volume = await async_client.gpu_droplets.volumes.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert volume is None @@ -500,7 +500,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) assert response.is_closed is True @@ -512,7 +512,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + "7724db7c-e098-11e5-b522-000f53304e51", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index e3450001..ec157513 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -81,7 +81,7 @@ def test_path_params_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.retrieve( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) @@ -89,7 +89,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert response.is_closed is True @@ -101,7 +101,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -175,7 +175,7 @@ def test_path_params_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: snapshot = client.gpu_droplets.volumes.snapshots.delete( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert snapshot is None @@ -183,7 +183,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.volumes.snapshots.with_raw_response.delete( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert response.is_closed is True @@ -195,7 +195,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -279,7 +279,7 @@ async def test_path_params_create(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.retrieve( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) @@ -287,7 +287,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert response.is_closed is True @@ -299,7 +299,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -373,7 +373,7 @@ async def test_path_params_list(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: snapshot = await async_client.gpu_droplets.volumes.snapshots.delete( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert snapshot is None @@ -381,7 +381,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) assert response.is_closed is True @@ -393,7 +393,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( - "snapshot_id", + "fbe805e8-866b-11e6-96bf-000f53315a41", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index d9745710..0bbfa00f 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -152,7 +152,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: api_key = client.inference.api_keys.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -160,7 +160,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -172,7 +172,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -194,7 +194,7 @@ def test_path_params_delete(self, client: Gradient) -> None: @parametrize def test_method_update_regenerate(self, client: Gradient) -> None: api_key = client.inference.api_keys.update_regenerate( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -202,7 +202,7 @@ def test_method_update_regenerate(self, client: Gradient) -> None: @parametrize def test_raw_response_update_regenerate(self, client: Gradient) -> None: response = client.inference.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -214,7 +214,7 @@ def test_raw_response_update_regenerate(self, client: Gradient) -> None: @parametrize def test_streaming_response_update_regenerate(self, client: Gradient) -> None: with client.inference.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -367,7 +367,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -375,7 +375,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -387,7 +387,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -409,7 +409,7 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_method_update_regenerate(self, async_client: AsyncGradient) -> None: api_key = await async_client.inference.api_keys.update_regenerate( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -417,7 +417,7 @@ async def test_method_update_regenerate(self, async_client: AsyncGradient) -> No @parametrize async def test_raw_response_update_regenerate(self, async_client: AsyncGradient) -> None: response = await async_client.inference.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -429,7 +429,7 @@ async def test_raw_response_update_regenerate(self, async_client: AsyncGradient) @parametrize async def test_streaming_response_update_regenerate(self, async_client: AsyncGradient) -> None: async with async_client.inference.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index 8840edfe..3dffaa69 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -72,7 +72,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -143,7 +143,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_retrieve_data_sources(self, client: Gradient) -> None: indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( - "indexing_job_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -151,7 +151,7 @@ def test_method_retrieve_data_sources(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve_data_sources(self, client: Gradient) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( - "indexing_job_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -163,7 +163,7 @@ def test_raw_response_retrieve_data_sources(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve_data_sources(self, client: Gradient) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( - "indexing_job_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -279,7 +279,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -287,7 +287,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -299,7 +299,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -358,7 +358,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradient) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( - "indexing_job_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -366,7 +366,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradient) - @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( - "indexing_job_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -378,7 +378,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( - "indexing_job_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 60cb0c16..b0aeb37c 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -65,7 +65,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) @@ -73,7 +73,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -85,7 +85,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -197,7 +197,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: anthropic = client.models.providers.anthropic.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) @@ -205,7 +205,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.models.providers.anthropic.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -217,7 +217,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.models.providers.anthropic.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -334,7 +334,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"]) @@ -342,7 +342,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -354,7 +354,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -466,7 +466,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: anthropic = await async_client.models.providers.anthropic.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"]) @@ -474,7 +474,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.anthropic.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -486,7 +486,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.anthropic.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 8f9c1f80..c5780e05 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -65,7 +65,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: openai = client.models.providers.openai.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) @@ -73,7 +73,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -85,7 +85,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -197,7 +197,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: openai = client.models.providers.openai.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) @@ -205,7 +205,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.models.providers.openai.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -217,7 +217,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.models.providers.openai.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -334,7 +334,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"]) @@ -342,7 +342,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -354,7 +354,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -466,7 +466,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: openai = await async_client.models.providers.openai.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIDeleteResponse, openai, path=["response"]) @@ -474,7 +474,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.models.providers.openai.with_raw_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -486,7 +486,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.models.providers.openai.with_streaming_response.delete( - "api_key_uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 6d040d18..2069d2d8 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -74,7 +74,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: agent = client.agents.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @@ -82,7 +82,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.agents.with_raw_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -94,7 +94,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.agents.with_streaming_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -221,7 +221,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: agent = client.agents.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @@ -229,7 +229,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.agents.with_raw_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -241,7 +241,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.agents.with_streaming_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -418,7 +418,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @@ -426,7 +426,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -438,7 +438,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -565,7 +565,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: agent = await async_client.agents.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @@ -573,7 +573,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.agents.with_raw_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -585,7 +585,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.agents.with_streaming_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index 0cb27fbb..7d50c037 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -161,7 +161,7 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.retrieve( - 1, + 3164444, ) assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) @@ -169,7 +169,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.retrieve( - 1, + 3164444, ) assert response.is_closed is True @@ -181,7 +181,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.retrieve( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -235,7 +235,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.delete( - 1, + 3164444, ) assert gpu_droplet is None @@ -243,7 +243,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.delete( - 1, + 3164444, ) assert response.is_closed is True @@ -255,7 +255,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.delete( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -391,7 +391,7 @@ def test_streaming_response_list_kernels(self, client: Gradient) -> None: @parametrize def test_method_list_neighbors(self, client: Gradient) -> None: gpu_droplet = client.gpu_droplets.list_neighbors( - 1, + 3164444, ) assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) @@ -399,7 +399,7 @@ def test_method_list_neighbors(self, client: Gradient) -> None: @parametrize def test_raw_response_list_neighbors(self, client: Gradient) -> None: response = client.gpu_droplets.with_raw_response.list_neighbors( - 1, + 3164444, ) assert response.is_closed is True @@ -411,7 +411,7 @@ def test_raw_response_list_neighbors(self, client: Gradient) -> None: @parametrize def test_streaming_response_list_neighbors(self, client: Gradient) -> None: with client.gpu_droplets.with_streaming_response.list_neighbors( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -607,7 +607,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncGra @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.retrieve( - 1, + 3164444, ) assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) @@ -615,7 +615,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.retrieve( - 1, + 3164444, ) assert response.is_closed is True @@ -627,7 +627,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.retrieve( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -681,7 +681,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.delete( - 1, + 3164444, ) assert gpu_droplet is None @@ -689,7 +689,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.delete( - 1, + 3164444, ) assert response.is_closed is True @@ -701,7 +701,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.delete( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -837,7 +837,7 @@ async def test_streaming_response_list_kernels(self, async_client: AsyncGradient @parametrize async def test_method_list_neighbors(self, async_client: AsyncGradient) -> None: gpu_droplet = await async_client.gpu_droplets.list_neighbors( - 1, + 3164444, ) assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) @@ -845,7 +845,7 @@ async def test_method_list_neighbors(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_list_neighbors(self, async_client: AsyncGradient) -> None: response = await async_client.gpu_droplets.with_raw_response.list_neighbors( - 1, + 3164444, ) assert response.is_closed is True @@ -857,7 +857,7 @@ async def test_raw_response_list_neighbors(self, async_client: AsyncGradient) -> @parametrize async def test_streaming_response_list_neighbors(self, async_client: AsyncGradient) -> None: async with async_client.gpu_droplets.with_streaming_response.list_neighbors( - 1, + 3164444, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 82698131..62965775 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -102,7 +102,7 @@ def test_streaming_response_create(self, client: Gradient) -> None: @parametrize def test_method_retrieve(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) @@ -110,7 +110,7 @@ def test_method_retrieve(self, client: Gradient) -> None: @parametrize def test_raw_response_retrieve(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -122,7 +122,7 @@ def test_raw_response_retrieve(self, client: Gradient) -> None: @parametrize def test_streaming_response_retrieve(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -237,7 +237,7 @@ def test_streaming_response_list(self, client: Gradient) -> None: @parametrize def test_method_delete(self, client: Gradient) -> None: knowledge_base = client.knowledge_bases.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) @@ -245,7 +245,7 @@ def test_method_delete(self, client: Gradient) -> None: @parametrize def test_raw_response_delete(self, client: Gradient) -> None: response = client.knowledge_bases.with_raw_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -257,7 +257,7 @@ def test_raw_response_delete(self, client: Gradient) -> None: @parametrize def test_streaming_response_delete(self, client: Gradient) -> None: with client.knowledge_bases.with_streaming_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -360,7 +360,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N @parametrize async def test_method_retrieve(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) @@ -368,7 +368,7 @@ async def test_method_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -380,7 +380,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.retrieve( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -495,7 +495,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradient) -> Non @parametrize async def test_method_delete(self, async_client: AsyncGradient) -> None: knowledge_base = await async_client.knowledge_bases.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) @@ -503,7 +503,7 @@ async def test_method_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: response = await async_client.knowledge_bases.with_raw_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -515,7 +515,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: async with async_client.knowledge_bases.with_streaming_response.delete( - "uuid", + '"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" From 9b878e68f1f5e012122fd36630ceb71c2f5345ea Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 01:24:57 +0000 Subject: [PATCH 175/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4191c889..56441f9d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0" + ".": "3.0.1" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index eed06559..7df55327 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0" +version = "3.0.1" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 17f05c22..12d04477 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0" # x-release-please-version +__version__ = "3.0.1" # x-release-please-version From 3e0ec20418fb9b5b7e01ea727db6e235d3e11840 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 15:42:52 +0000 Subject: [PATCH 176/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 56441f9d..a1304a17 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.1" + ".": "3.0.2" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 7df55327..bb13523b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.1" +version = "3.0.2" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 12d04477..bd32dfe9 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.1" # x-release-please-version +__version__ = "3.0.2" # x-release-please-version From 8b674bc6d2858dd1193a8ae6f0ee5a605669ab1b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 15:07:55 +0000 Subject: [PATCH 177/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a1304a17..e0dc5001 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.2" + ".": "3.1.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index bb13523b..cae5ec3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.2" +version = "3.1.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index bd32dfe9..69cb2fcb 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.2" # x-release-please-version +__version__ = "3.1.0" # x-release-please-version From 6327cdfbeea5fd8330b95b0f28a7dc6d98c29389 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 21:18:44 +0000 Subject: [PATCH 178/200] feat(api): Images generations --- .stats.yml | 8 +- api.md | 20 + src/gradient/_client.py | 39 +- src/gradient/resources/__init__.py | 14 + src/gradient/resources/images/__init__.py | 33 + src/gradient/resources/images/generations.py | 686 ++++++++++++++++++ src/gradient/resources/images/images.py | 102 +++ .../resources/knowledge_bases/data_sources.py | 94 +++ src/gradient/types/__init__.py | 3 + .../agents/chat/completion_create_params.py | 68 +- .../agents/chat/completion_create_response.py | 3 + .../types/chat/completion_create_params.py | 68 +- .../types/chat/completion_create_response.py | 3 + .../types/gpu_droplets/account/__init__.py | 1 + .../account/key_create_response.py | 33 +- .../gpu_droplets/account/key_list_response.py | 33 +- .../account/key_retrieve_response.py | 33 +- .../account/key_update_response.py | 33 +- .../types/gpu_droplets/account/ssh_keys.py | 35 + src/gradient/types/images/__init__.py | 6 + .../types/images/generation_create_params.py | 100 +++ .../images/generation_create_response.py | 63 ++ .../types/knowledge_bases/__init__.py | 6 + ...ata_source_create_presigned_urls_params.py | 21 + ...a_source_create_presigned_urls_response.py | 30 + src/gradient/types/shared/__init__.py | 3 + .../types/shared/chat_completion_chunk.py | 3 + .../types/shared/image_gen_completed_event.py | 55 ++ .../shared/image_gen_partial_image_event.py | 33 + .../types/shared/image_gen_stream_event.py | 14 + src/gradient/types/shared/size.py | 2 +- tests/api_resources/images/__init__.py | 1 + .../api_resources/images/test_generations.py | 240 ++++++ .../knowledge_bases/test_data_sources.py | 85 +++ 34 files changed, 1837 insertions(+), 134 deletions(-) create mode 100644 src/gradient/resources/images/__init__.py create mode 100644 src/gradient/resources/images/generations.py create mode 100644 src/gradient/resources/images/images.py create mode 100644 src/gradient/types/gpu_droplets/account/ssh_keys.py create mode 100644 src/gradient/types/images/__init__.py create mode 100644 src/gradient/types/images/generation_create_params.py create mode 100644 src/gradient/types/images/generation_create_response.py create mode 100644 src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py create mode 100644 src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py create mode 100644 src/gradient/types/shared/image_gen_completed_event.py create mode 100644 src/gradient/types/shared/image_gen_partial_image_event.py create mode 100644 src/gradient/types/shared/image_gen_stream_event.py create mode 100644 tests/api_resources/images/__init__.py create mode 100644 tests/api_resources/images/test_generations.py diff --git a/.stats.yml b/.stats.yml index e30c19b7..13507cd5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 173 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml -openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 3d425c415b7f7ab581418b43eb521cb3 +configured_endpoints: 175 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml +openapi_spec_hash: 83a3d092965fde776b29b61f785459f9 +config_hash: dd3a0f16fb9e072bb63c570b14beccd2 diff --git a/api.md b/api.md index 7299b3c6..6dd6c18e 100644 --- a/api.md +++ b/api.md @@ -18,6 +18,9 @@ from gradient.types import ( GarbageCollection, GPUInfo, Image, + ImageGenCompletedEvent, + ImageGenPartialImageEvent, + ImageGenStreamEvent, Kernel, MetaProperties, NetworkV4, @@ -383,6 +386,20 @@ Methods: - client.chat.completions.create(\*\*params) -> CompletionCreateResponse +# Images + +## Generations + +Types: + +```python +from gradient.types.images import GenerationCreateResponse +``` + +Methods: + +- client.images.generations.create(\*\*params) -> GenerationCreateResponse + # GPUDroplets Types: @@ -757,6 +774,7 @@ Types: ```python from gradient.types.gpu_droplets.account import ( + SSHKeys, KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -834,6 +852,7 @@ from gradient.types.knowledge_bases import ( DataSourceCreateResponse, DataSourceListResponse, DataSourceDeleteResponse, + DataSourceCreatePresignedURLsResponse, ) ``` @@ -842,6 +861,7 @@ Methods: - client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create_presigned_urls(\*\*params) -> DataSourceCreatePresignedURLsResponse ## IndexingJobs diff --git a/src/gradient/_client.py b/src/gradient/_client.py index a215f629..80e8ff63 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -32,10 +32,11 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, databases, inference, gpu_droplets, knowledge_bases + from .resources import chat, agents, images, models, regions, databases, inference, gpu_droplets, knowledge_bases from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.images.images import ImagesResource, AsyncImagesResource from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource @@ -149,6 +150,12 @@ def chat(self) -> ChatResource: return ChatResource(self) + @cached_property + def images(self) -> ImagesResource: + from .resources.images import ImagesResource + + return ImagesResource(self) + @cached_property def gpu_droplets(self) -> GPUDropletsResource: from .resources.gpu_droplets import GPUDropletsResource @@ -416,6 +423,12 @@ def chat(self) -> AsyncChatResource: return AsyncChatResource(self) + @cached_property + def images(self) -> AsyncImagesResource: + from .resources.images import AsyncImagesResource + + return AsyncImagesResource(self) + @cached_property def gpu_droplets(self) -> AsyncGPUDropletsResource: from .resources.gpu_droplets import AsyncGPUDropletsResource @@ -606,6 +619,12 @@ def chat(self) -> chat.ChatResourceWithRawResponse: return ChatResourceWithRawResponse(self._client.chat) + @cached_property + def images(self) -> images.ImagesResourceWithRawResponse: + from .resources.images import ImagesResourceWithRawResponse + + return ImagesResourceWithRawResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse @@ -661,6 +680,12 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse: return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property + def images(self) -> images.AsyncImagesResourceWithRawResponse: + from .resources.images import AsyncImagesResourceWithRawResponse + + return AsyncImagesResourceWithRawResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse @@ -716,6 +741,12 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse: return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def images(self) -> images.ImagesResourceWithStreamingResponse: + from .resources.images import ImagesResourceWithStreamingResponse + + return ImagesResourceWithStreamingResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse @@ -771,6 +802,12 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def images(self) -> images.AsyncImagesResourceWithStreamingResponse: + from .resources.images import AsyncImagesResourceWithStreamingResponse + + return AsyncImagesResourceWithStreamingResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse: from .resources.gpu_droplets import AsyncGPUDropletsResourceWithStreamingResponse diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py index d5198560..fdc7d346 100644 --- a/src/gradient/resources/__init__.py +++ b/src/gradient/resources/__init__.py @@ -16,6 +16,14 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) from .models import ( ModelsResource, AsyncModelsResource, @@ -78,6 +86,12 @@ "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", "GPUDropletsResource", "AsyncGPUDropletsResource", "GPUDropletsResourceWithRawResponse", diff --git a/src/gradient/resources/images/__init__.py b/src/gradient/resources/images/__init__.py new file mode 100644 index 00000000..cf187f1d --- /dev/null +++ b/src/gradient/resources/images/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .generations import ( + GenerationsResource, + AsyncGenerationsResource, + GenerationsResourceWithRawResponse, + AsyncGenerationsResourceWithRawResponse, + GenerationsResourceWithStreamingResponse, + AsyncGenerationsResourceWithStreamingResponse, +) + +__all__ = [ + "GenerationsResource", + "AsyncGenerationsResource", + "GenerationsResourceWithRawResponse", + "AsyncGenerationsResourceWithRawResponse", + "GenerationsResourceWithStreamingResponse", + "AsyncGenerationsResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", +] diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py new file mode 100644 index 00000000..592c1bde --- /dev/null +++ b/src/gradient/resources/images/generations.py @@ -0,0 +1,686 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, overload + +import httpx + +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._streaming import Stream, AsyncStream +from ..._base_client import make_request_options +from ...types.images import generation_create_params +from ...types.shared.image_gen_stream_event import ImageGenStreamEvent +from ...types.images.generation_create_response import GenerationCreateResponse + +__all__ = ["GenerationsResource", "AsyncGenerationsResource"] + + +class GenerationsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> GenerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return GenerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return GenerationsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Stream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + prompt: str, + stream: bool, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + return self._post( + "/images/generations" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/images/generations", + body=maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "size": size, + "stream": stream, + "user": user, + }, + generation_create_params.GenerationCreateParamsStreaming + if stream + else generation_create_params.GenerationCreateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GenerationCreateResponse, + stream=stream or False, + stream_cls=Stream[ImageGenStreamEvent], + ) + + +class AsyncGenerationsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncGenerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncGenerationsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AsyncStream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + prompt: str, + stream: bool, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + async def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + return await self._post( + "/images/generations" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/images/generations", + body=await async_maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "size": size, + "stream": stream, + "user": user, + }, + generation_create_params.GenerationCreateParamsStreaming + if stream + else generation_create_params.GenerationCreateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GenerationCreateResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageGenStreamEvent], + ) + + +class GenerationsResourceWithRawResponse: + def __init__(self, generations: GenerationsResource) -> None: + self._generations = generations + + self.create = to_raw_response_wrapper( + generations.create, + ) + + +class AsyncGenerationsResourceWithRawResponse: + def __init__(self, generations: AsyncGenerationsResource) -> None: + self._generations = generations + + self.create = async_to_raw_response_wrapper( + generations.create, + ) + + +class GenerationsResourceWithStreamingResponse: + def __init__(self, generations: GenerationsResource) -> None: + self._generations = generations + + self.create = to_streamed_response_wrapper( + generations.create, + ) + + +class AsyncGenerationsResourceWithStreamingResponse: + def __init__(self, generations: AsyncGenerationsResource) -> None: + self._generations = generations + + self.create = async_to_streamed_response_wrapper( + generations.create, + ) diff --git a/src/gradient/resources/images/images.py b/src/gradient/resources/images/images.py new file mode 100644 index 00000000..37e7290f --- /dev/null +++ b/src/gradient/resources/images/images.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .generations import ( + GenerationsResource, + AsyncGenerationsResource, + GenerationsResourceWithRawResponse, + AsyncGenerationsResourceWithRawResponse, + GenerationsResourceWithStreamingResponse, + AsyncGenerationsResourceWithStreamingResponse, +) + +__all__ = ["ImagesResource", "AsyncImagesResource"] + + +class ImagesResource(SyncAPIResource): + @cached_property + def generations(self) -> GenerationsResource: + return GenerationsResource(self._client) + + @cached_property + def with_raw_response(self) -> ImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return ImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return ImagesResourceWithStreamingResponse(self) + + +class AsyncImagesResource(AsyncAPIResource): + @cached_property + def generations(self) -> AsyncGenerationsResource: + return AsyncGenerationsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncImagesResourceWithStreamingResponse(self) + + +class ImagesResourceWithRawResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> GenerationsResourceWithRawResponse: + return GenerationsResourceWithRawResponse(self._images.generations) + + +class AsyncImagesResourceWithRawResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> AsyncGenerationsResourceWithRawResponse: + return AsyncGenerationsResourceWithRawResponse(self._images.generations) + + +class ImagesResourceWithStreamingResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> GenerationsResourceWithStreamingResponse: + return GenerationsResourceWithStreamingResponse(self._images.generations) + + +class AsyncImagesResourceWithStreamingResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> AsyncGenerationsResourceWithStreamingResponse: + return AsyncGenerationsResourceWithStreamingResponse(self._images.generations) diff --git a/src/gradient/resources/knowledge_bases/data_sources.py b/src/gradient/resources/knowledge_bases/data_sources.py index 083ea45f..a00d93f5 100644 --- a/src/gradient/resources/knowledge_bases/data_sources.py +++ b/src/gradient/resources/knowledge_bases/data_sources.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import Iterable + import httpx from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given @@ -18,6 +20,7 @@ from ...types.knowledge_bases import ( data_source_list_params, data_source_create_params, + data_source_create_presigned_urls_params, ) from ...types.knowledge_bases.aws_data_source_param import AwsDataSourceParam from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse @@ -25,6 +28,7 @@ from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam +from ...types.knowledge_bases.data_source_create_presigned_urls_response import DataSourceCreatePresignedURLsResponse __all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] @@ -203,6 +207,45 @@ def delete( cast_to=DataSourceDeleteResponse, ) + def create_presigned_urls( + self, + *, + files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> DataSourceCreatePresignedURLsResponse: + """ + To create presigned URLs for knowledge base data source file upload, send a POST + request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls", + body=maybe_transform( + {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreatePresignedURLsResponse, + ) + class AsyncDataSourcesResource(AsyncAPIResource): @cached_property @@ -378,6 +421,45 @@ async def delete( cast_to=DataSourceDeleteResponse, ) + async def create_presigned_urls( + self, + *, + files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> DataSourceCreatePresignedURLsResponse: + """ + To create presigned URLs for knowledge base data source file upload, send a POST + request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls", + body=await async_maybe_transform( + {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreatePresignedURLsResponse, + ) + class DataSourcesResourceWithRawResponse: def __init__(self, data_sources: DataSourcesResource) -> None: @@ -392,6 +474,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.delete = to_raw_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = to_raw_response_wrapper( + data_sources.create_presigned_urls, + ) class AsyncDataSourcesResourceWithRawResponse: @@ -407,6 +492,9 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.delete = async_to_raw_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = async_to_raw_response_wrapper( + data_sources.create_presigned_urls, + ) class DataSourcesResourceWithStreamingResponse: @@ -422,6 +510,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.delete = to_streamed_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = to_streamed_response_wrapper( + data_sources.create_presigned_urls, + ) class AsyncDataSourcesResourceWithStreamingResponse: @@ -437,3 +528,6 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.delete = async_to_streamed_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = async_to_streamed_response_wrapper( + data_sources.create_presigned_urls, + ) diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py index 42faaff6..939e26f7 100644 --- a/src/gradient/types/__init__.py +++ b/src/gradient/types/__init__.py @@ -39,8 +39,11 @@ GarbageCollection as GarbageCollection, FirewallRuleTarget as FirewallRuleTarget, ChatCompletionChunk as ChatCompletionChunk, + ImageGenStreamEvent as ImageGenStreamEvent, SubscriptionTierBase as SubscriptionTierBase, + ImageGenCompletedEvent as ImageGenCompletedEvent, DropletNextBackupWindow as DropletNextBackupWindow, + ImageGenPartialImageEvent as ImageGenPartialImageEvent, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, ) from .api_agent import APIAgent as APIAgent diff --git a/src/gradient/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py index d8cf7bc1..0980132e 100644 --- a/src/gradient/types/agents/chat/completion_create_params.py +++ b/src/gradient/types/agents/chat/completion_create_params.py @@ -11,9 +11,17 @@ "CompletionCreateParamsBase", "Message", "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPart", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessageToolCall", "MessageChatCompletionRequestAssistantMessageToolCallFunction", "MessageChatCompletionRequestToolMessage", @@ -157,30 +165,82 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ +class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]] """The contents of the system message.""" role: Required[Literal["system"]] """The role of the messages author, in this case `system`.""" +class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]] """The contents of the developer message.""" role: Required[Literal["developer"]] """The role of the messages author, in this case `developer`.""" +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]] """The contents of the user message.""" role: Required[Literal["user"]] """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): arguments: Required[str] """ @@ -209,7 +269,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Union[str, SequenceNotStr[str], None] + content: Union[str, SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] """The contents of the assistant message.""" tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] diff --git a/src/gradient/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py index 4c839ded..f3dedb4e 100644 --- a/src/gradient/types/agents/chat/completion_create_response.py +++ b/src/gradient/types/agents/chat/completion_create_response.py @@ -53,6 +53,9 @@ class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" + reasoning_content: Optional[str] = None + """The reasoning content generated by the model.""" + refusal: Optional[str] = None """The refusal message generated by the model.""" diff --git a/src/gradient/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py index 17f00242..7874d893 100644 --- a/src/gradient/types/chat/completion_create_params.py +++ b/src/gradient/types/chat/completion_create_params.py @@ -11,9 +11,17 @@ "CompletionCreateParamsBase", "Message", "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPart", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessageToolCall", "MessageChatCompletionRequestAssistantMessageToolCallFunction", "MessageChatCompletionRequestToolMessage", @@ -157,30 +165,82 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ +class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]] """The contents of the system message.""" role: Required[Literal["system"]] """The role of the messages author, in this case `system`.""" +class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]] """The contents of the developer message.""" role: Required[Literal["developer"]] """The role of the messages author, in this case `developer`.""" +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]] """The contents of the user message.""" role: Required[Literal["user"]] """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): arguments: Required[str] """ @@ -209,7 +269,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Union[str, SequenceNotStr[str], None] + content: Union[str, SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] """The contents of the assistant message.""" tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] diff --git a/src/gradient/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py index 73a09cf5..9e157aee 100644 --- a/src/gradient/types/chat/completion_create_response.py +++ b/src/gradient/types/chat/completion_create_response.py @@ -53,6 +53,9 @@ class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" + reasoning_content: Optional[str] = None + """The reasoning content generated by the model.""" + refusal: Optional[str] = None """The refusal message generated by the model.""" diff --git a/src/gradient/types/gpu_droplets/account/__init__.py b/src/gradient/types/gpu_droplets/account/__init__.py index 4cd64974..2d8a05ae 100644 --- a/src/gradient/types/gpu_droplets/account/__init__.py +++ b/src/gradient/types/gpu_droplets/account/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .ssh_keys import SSHKeys as SSHKeys from .key_list_params import KeyListParams as KeyListParams from .key_create_params import KeyCreateParams as KeyCreateParams from .key_list_response import KeyListResponse as KeyListResponse diff --git a/src/gradient/types/gpu_droplets/account/key_create_response.py b/src/gradient/types/gpu_droplets/account/key_create_response.py index 9fe566ed..5ce63269 100644 --- a/src/gradient/types/gpu_droplets/account/key_create_response.py +++ b/src/gradient/types/gpu_droplets/account/key_create_response.py @@ -2,38 +2,11 @@ from typing import Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel -__all__ = ["KeyCreateResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyCreateResponse"] class KeyCreateResponse(BaseModel): - ssh_key: Optional[SSHKey] = None + ssh_key: Optional[SSHKeys] = None diff --git a/src/gradient/types/gpu_droplets/account/key_list_response.py b/src/gradient/types/gpu_droplets/account/key_list_response.py index be4c721c..1151043e 100644 --- a/src/gradient/types/gpu_droplets/account/key_list_response.py +++ b/src/gradient/types/gpu_droplets/account/key_list_response.py @@ -2,39 +2,12 @@ from typing import List, Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel from ...shared.page_links import PageLinks from ...shared.meta_properties import MetaProperties -__all__ = ["KeyListResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyListResponse"] class KeyListResponse(BaseModel): @@ -43,4 +16,4 @@ class KeyListResponse(BaseModel): links: Optional[PageLinks] = None - ssh_keys: Optional[List[SSHKey]] = None + ssh_keys: Optional[List[SSHKeys]] = None diff --git a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py index 7cd3215e..da6e94d1 100644 --- a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py +++ b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py @@ -2,38 +2,11 @@ from typing import Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel -__all__ = ["KeyRetrieveResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyRetrieveResponse"] class KeyRetrieveResponse(BaseModel): - ssh_key: Optional[SSHKey] = None + ssh_key: Optional[SSHKeys] = None diff --git a/src/gradient/types/gpu_droplets/account/key_update_response.py b/src/gradient/types/gpu_droplets/account/key_update_response.py index 2821e44a..54b81426 100644 --- a/src/gradient/types/gpu_droplets/account/key_update_response.py +++ b/src/gradient/types/gpu_droplets/account/key_update_response.py @@ -2,38 +2,11 @@ from typing import Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel -__all__ = ["KeyUpdateResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyUpdateResponse"] class KeyUpdateResponse(BaseModel): - ssh_key: Optional[SSHKey] = None + ssh_key: Optional[SSHKeys] = None diff --git a/src/gradient/types/gpu_droplets/account/ssh_keys.py b/src/gradient/types/gpu_droplets/account/ssh_keys.py new file mode 100644 index 00000000..8112c18a --- /dev/null +++ b/src/gradient/types/gpu_droplets/account/ssh_keys.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["SSHKeys"] + + +class SSHKeys(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ diff --git a/src/gradient/types/images/__init__.py b/src/gradient/types/images/__init__.py new file mode 100644 index 00000000..29634ec1 --- /dev/null +++ b/src/gradient/types/images/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .generation_create_params import GenerationCreateParams as GenerationCreateParams +from .generation_create_response import GenerationCreateResponse as GenerationCreateResponse diff --git a/src/gradient/types/images/generation_create_params.py b/src/gradient/types/images/generation_create_params.py new file mode 100644 index 00000000..ec8b672f --- /dev/null +++ b/src/gradient/types/images/generation_create_params.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["GenerationCreateParamsBase", "GenerationCreateParamsNonStreaming", "GenerationCreateParamsStreaming"] + + +class GenerationCreateParamsBase(TypedDict, total=False): + prompt: Required[str] + """A text description of the desired image(s). + + GPT-IMAGE-1 supports up to 32,000 characters and provides automatic prompt + optimization for best results. + """ + + background: Optional[str] + """The background setting for the image generation. + + GPT-IMAGE-1 supports: transparent, opaque, auto. + """ + + model: str + """The model to use for image generation. + + GPT-IMAGE-1 is the latest model offering the best quality with automatic + optimization and enhanced capabilities. + """ + + moderation: Optional[str] + """The moderation setting for the image generation. + + GPT-IMAGE-1 supports: low, auto. + """ + + n: Optional[int] + """The number of images to generate. GPT-IMAGE-1 only supports n=1.""" + + output_compression: Optional[int] + """The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.""" + + output_format: Optional[str] + """The output format for the image generation. + + GPT-IMAGE-1 supports: png, webp, jpeg. + """ + + partial_images: Optional[int] + """The number of partial image chunks to return during streaming generation. + + This parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + """ + + quality: Optional[str] + """The quality of the image that will be generated. + + GPT-IMAGE-1 supports: auto (automatically select best quality), high, medium, + low. + """ + + size: Optional[str] + """The size of the generated images. + + GPT-IMAGE-1 supports: auto (automatically select best size), 1536x1024 + (landscape), 1024x1536 (portrait). + """ + + user: Optional[str] + """ + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + """ + + +class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + """ + + +class GenerationCreateParamsStreaming(GenerationCreateParamsBase): + stream: Required[Literal[True]] + """ + If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + """ + + +GenerationCreateParams = Union[GenerationCreateParamsNonStreaming, GenerationCreateParamsStreaming] diff --git a/src/gradient/types/images/generation_create_response.py b/src/gradient/types/images/generation_create_response.py new file mode 100644 index 00000000..32757c06 --- /dev/null +++ b/src/gradient/types/images/generation_create_response.py @@ -0,0 +1,63 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["GenerationCreateResponse", "Data", "Usage", "UsageInputTokensDetails"] + + +class Data(BaseModel): + b64_json: str + """The base64-encoded JSON of the generated image. + + GPT-IMAGE-1 returns images in b64_json format only. + """ + + revised_prompt: Optional[str] = None + """The optimized prompt that was used to generate the image. + + GPT-IMAGE-1 automatically optimizes prompts for best results. + """ + + +class UsageInputTokensDetails(BaseModel): + text_tokens: Optional[int] = None + """Number of text tokens in the input""" + + +class Usage(BaseModel): + input_tokens: int + """Number of tokens in the input prompt""" + + total_tokens: int + """Total number of tokens used (input + output)""" + + input_tokens_details: Optional[UsageInputTokensDetails] = None + """Detailed breakdown of input tokens""" + + output_tokens: Optional[int] = None + """Number of tokens in the generated output""" + + +class GenerationCreateResponse(BaseModel): + created: int + """The Unix timestamp (in seconds) of when the images were created""" + + data: List[Data] + """The list of generated images""" + + background: Optional[str] = None + """The background setting used for the image generation""" + + output_format: Optional[str] = None + """The output format of the generated image""" + + quality: Optional[str] = None + """The quality setting used for the image generation""" + + size: Optional[str] = None + """The size of the generated image""" + + usage: Optional[Usage] = None + """Usage statistics for the image generation request""" diff --git a/src/gradient/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py index b23053f2..cab865fa 100644 --- a/src/gradient/types/knowledge_bases/__init__.py +++ b/src/gradient/types/knowledge_bases/__init__.py @@ -24,6 +24,12 @@ from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .data_source_create_presigned_urls_params import ( + DataSourceCreatePresignedURLsParams as DataSourceCreatePresignedURLsParams, +) +from .data_source_create_presigned_urls_response import ( + DataSourceCreatePresignedURLsResponse as DataSourceCreatePresignedURLsResponse, +) from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py new file mode 100644 index 00000000..253cbce7 --- /dev/null +++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import TypedDict + +__all__ = ["DataSourceCreatePresignedURLsParams", "File"] + + +class DataSourceCreatePresignedURLsParams(TypedDict, total=False): + files: Iterable[File] + """A list of files to generate presigned URLs for.""" + + +class File(TypedDict, total=False): + file_name: str + """Local filename""" + + file_size: str + """The size of the file in bytes.""" diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py new file mode 100644 index 00000000..c3d172d7 --- /dev/null +++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["DataSourceCreatePresignedURLsResponse", "Upload"] + + +class Upload(BaseModel): + expires_at: Optional[datetime] = None + """The time the url expires at.""" + + object_key: Optional[str] = None + """The unique object key to store the file as.""" + + original_file_name: Optional[str] = None + """The original file name.""" + + presigned_url: Optional[str] = None + """The actual presigned URL the client can use to upload the file directly.""" + + +class DataSourceCreatePresignedURLsResponse(BaseModel): + request_id: Optional[str] = None + """The ID generated for the request for Presigned URLs.""" + + uploads: Optional[List[Upload]] = None + """A list of generated presigned URLs and object keys, one per file.""" diff --git a/src/gradient/types/shared/__init__.py b/src/gradient/types/shared/__init__.py index 6d90845f..4fb2986a 100644 --- a/src/gradient/types/shared/__init__.py +++ b/src/gradient/types/shared/__init__.py @@ -24,6 +24,9 @@ from .garbage_collection import GarbageCollection as GarbageCollection from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk +from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase +from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent diff --git a/src/gradient/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py index 4dd587f9..e30e0604 100644 --- a/src/gradient/types/shared/chat_completion_chunk.py +++ b/src/gradient/types/shared/chat_completion_chunk.py @@ -47,6 +47,9 @@ class ChoiceDelta(BaseModel): content: Optional[str] = None """The contents of the chunk message.""" + reasoning_content: Optional[str] = None + """The reasoning content generated by the model.""" + refusal: Optional[str] = None """The refusal message generated by the model.""" diff --git a/src/gradient/types/shared/image_gen_completed_event.py b/src/gradient/types/shared/image_gen_completed_event.py new file mode 100644 index 00000000..cbb282e5 --- /dev/null +++ b/src/gradient/types/shared/image_gen_completed_event.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"] + + +class UsageInputTokensDetails(BaseModel): + image_tokens: int + """The number of image tokens in the input prompt.""" + + text_tokens: int + """The number of text tokens in the input prompt.""" + + +class Usage(BaseModel): + input_tokens: int + """The number of tokens (images and text) in the input prompt.""" + + input_tokens_details: UsageInputTokensDetails + """The input tokens detailed information for the image generation.""" + + output_tokens: int + """The number of image tokens in the output image.""" + + total_tokens: int + """The total number of tokens (images and text) used for the image generation.""" + + +class ImageGenCompletedEvent(BaseModel): + b64_json: str + """Base64-encoded image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the generated image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the generated image.""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the generated image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the generated image.""" + + type: Literal["image_generation.completed"] + """The type of the event. Always `image_generation.completed`.""" + + usage: Usage + """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/src/gradient/types/shared/image_gen_partial_image_event.py b/src/gradient/types/shared/image_gen_partial_image_event.py new file mode 100644 index 00000000..4cc704b2 --- /dev/null +++ b/src/gradient/types/shared/image_gen_partial_image_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ImageGenPartialImageEvent"] + + +class ImageGenPartialImageEvent(BaseModel): + b64_json: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the requested image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the requested image.""" + + partial_image_index: int + """0-based index for the partial image (streaming).""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the requested image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the requested image.""" + + type: Literal["image_generation.partial_image"] + """The type of the event. Always `image_generation.partial_image`.""" diff --git a/src/gradient/types/shared/image_gen_stream_event.py b/src/gradient/types/shared/image_gen_stream_event.py new file mode 100644 index 00000000..30e9571e --- /dev/null +++ b/src/gradient/types/shared/image_gen_stream_event.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .image_gen_completed_event import ImageGenCompletedEvent +from .image_gen_partial_image_event import ImageGenPartialImageEvent + +__all__ = ["ImageGenStreamEvent"] + +ImageGenStreamEvent: TypeAlias = Annotated[ + Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type") +] diff --git a/src/gradient/types/shared/size.py b/src/gradient/types/shared/size.py index 42b0b41f..73abb7dd 100644 --- a/src/gradient/types/shared/size.py +++ b/src/gradient/types/shared/size.py @@ -50,7 +50,7 @@ class Size(BaseModel): regions: List[str] """ An array containing the region slugs where this size is available for Droplet - creates. regions:read is required to view. + creates. """ slug: str diff --git a/tests/api_resources/images/__init__.py b/tests/api_resources/images/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/images/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/images/test_generations.py b/tests/api_resources/images/test_generations.py new file mode 100644 index 00000000..c9c67564 --- /dev/null +++ b/tests/api_resources/images/test_generations.py @@ -0,0 +1,240 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types.images import GenerationCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestGenerations: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_1(self, client: Gradient) -> None: + generation = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: + generation = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + stream=False, + user="user-1234", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_1(self, client: Gradient) -> None: + response = client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + generation = response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: + with client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + generation = response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_2(self, client: Gradient) -> None: + generation_stream = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + generation_stream.response.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: + generation_stream = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + user="user-1234", + ) + generation_stream.response.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_2(self, client: Gradient) -> None: + response = client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: + with client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + + +class TestAsyncGenerations: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: + generation = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: + generation = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + stream=False, + user="user-1234", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: + response = await async_client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + generation = await response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: + async with async_client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + generation = await response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: + generation_stream = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + await generation_stream.response.aclose() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: + generation_stream = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + user="user-1234", + ) + await generation_stream.response.aclose() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: + response = await async_client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = await response.parse() + await stream.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: + async with async_client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index bd7158d2..4214f880 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -13,6 +13,7 @@ DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, + DataSourceCreatePresignedURLsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -195,6 +196,47 @@ def test_path_params_delete(self, client: Gradient) -> None: knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_presigned_urls(self, client: Gradient) -> None: + data_source = client.knowledge_bases.data_sources.create_presigned_urls() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_presigned_urls_with_all_params(self, client: Gradient) -> None: + data_source = client.knowledge_bases.data_sources.create_presigned_urls( + files=[ + { + "file_name": "example name", + "file_size": "file_size", + } + ], + ) + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_presigned_urls(self, client: Gradient) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_presigned_urls(self, client: Gradient) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + class TestAsyncDataSources: parametrize = pytest.mark.parametrize( @@ -374,3 +416,46 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: data_source_uuid="", knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_presigned_urls(self, async_client: AsyncGradient) -> None: + data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None: + data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls( + files=[ + { + "file_name": "example name", + "file_size": "file_size", + } + ], + ) + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_presigned_urls(self, async_client: AsyncGradient) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_presigned_urls(self, async_client: AsyncGradient) -> None: + async with ( + async_client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls() + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True From 9bf1e1d93a5b0636acfc258d0350725b9fc1681e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:14:43 +0000 Subject: [PATCH 179/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e0dc5001..1f73031b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.1.0" + ".": "3.2.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index cae5ec3c..5f50da32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.1.0" +version = "3.2.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 69cb2fcb..6607400d 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.1.0" # x-release-please-version +__version__ = "3.2.0" # x-release-please-version From b298dab1baba03745c03609f50dbe2ffab6ceb6a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 17:11:22 +0000 Subject: [PATCH 180/200] feat(api): Images generations - openai --- .stats.yml | 2 +- api.md | 6 +- src/gradient/_client.py | 2 +- .../{images/generations.py => images.py} | 122 +++++++++--------- src/gradient/resources/images/__init__.py | 33 ----- src/gradient/resources/images/images.py | 102 --------------- src/gradient/types/__init__.py | 2 + ...ate_params.py => image_generate_params.py} | 10 +- ...response.py => image_generate_response.py} | 6 +- src/gradient/types/images/__init__.py | 6 - tests/api_resources/images/__init__.py | 1 - .../test_generations.py => test_images.py} | 102 +++++++-------- 12 files changed, 126 insertions(+), 268 deletions(-) rename src/gradient/resources/{images/generations.py => images.py} (89%) delete mode 100644 src/gradient/resources/images/__init__.py delete mode 100644 src/gradient/resources/images/images.py rename src/gradient/types/{images/generation_create_params.py => image_generate_params.py} (86%) rename src/gradient/types/{images/generation_create_response.py => image_generate_response.py} (90%) delete mode 100644 src/gradient/types/images/__init__.py delete mode 100644 tests/api_resources/images/__init__.py rename tests/api_resources/{images/test_generations.py => test_images.py} (62%) diff --git a/.stats.yml b/.stats.yml index 13507cd5..60c46674 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 175 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml openapi_spec_hash: 83a3d092965fde776b29b61f785459f9 -config_hash: dd3a0f16fb9e072bb63c570b14beccd2 +config_hash: 8497af1695ff361853c745dd869dc6b9 diff --git a/api.md b/api.md index 6dd6c18e..a2325441 100644 --- a/api.md +++ b/api.md @@ -388,17 +388,15 @@ Methods: # Images -## Generations - Types: ```python -from gradient.types.images import GenerationCreateResponse +from gradient.types import ImageGenerateResponse ``` Methods: -- client.images.generations.create(\*\*params) -> GenerationCreateResponse +- client.images.generate(\*\*params) -> ImageGenerateResponse # GPUDroplets diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 80e8ff63..2e7de55d 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -33,10 +33,10 @@ if TYPE_CHECKING: from .resources import chat, agents, images, models, regions, databases, inference, gpu_droplets, knowledge_bases + from .resources.images import ImagesResource, AsyncImagesResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.images.images import ImagesResource, AsyncImagesResource from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images.py similarity index 89% rename from src/gradient/resources/images/generations.py rename to src/gradient/resources/images.py index 592c1bde..1cf13502 100644 --- a/src/gradient/resources/images/generations.py +++ b/src/gradient/resources/images.py @@ -7,47 +7,47 @@ import httpx -from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import image_generate_params +from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from .._utils import required_args, maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._streaming import Stream, AsyncStream -from ..._base_client import make_request_options -from ...types.images import generation_create_params -from ...types.shared.image_gen_stream_event import ImageGenStreamEvent -from ...types.images.generation_create_response import GenerationCreateResponse +from .._streaming import Stream, AsyncStream +from .._base_client import make_request_options +from ..types.image_generate_response import ImageGenerateResponse +from ..types.shared.image_gen_stream_event import ImageGenStreamEvent -__all__ = ["GenerationsResource", "AsyncGenerationsResource"] +__all__ = ["ImagesResource", "AsyncImagesResource"] -class GenerationsResource(SyncAPIResource): +class ImagesResource(SyncAPIResource): @cached_property - def with_raw_response(self) -> GenerationsResourceWithRawResponse: + def with_raw_response(self) -> ImagesResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ - return GenerationsResourceWithRawResponse(self) + return ImagesResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse: + def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ - return GenerationsResourceWithStreamingResponse(self) + return ImagesResourceWithStreamingResponse(self) @overload - def create( + def generate( self, *, prompt: str, @@ -68,7 +68,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse: + ) -> ImageGenerateResponse: """ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest image generation model with automatic prompt optimization and enhanced visual @@ -126,7 +126,7 @@ def create( ... @overload - def create( + def generate( self, *, prompt: str, @@ -205,7 +205,7 @@ def create( ... @overload - def create( + def generate( self, *, prompt: str, @@ -226,7 +226,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + ) -> ImageGenerateResponse | Stream[ImageGenStreamEvent]: """ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest image generation model with automatic prompt optimization and enhanced visual @@ -284,7 +284,7 @@ def create( ... @required_args(["prompt"], ["prompt", "stream"]) - def create( + def generate( self, *, prompt: str, @@ -305,7 +305,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + ) -> ImageGenerateResponse | Stream[ImageGenStreamEvent]: return self._post( "/images/generations" if self._client._base_url_overridden @@ -325,41 +325,41 @@ def create( "stream": stream, "user": user, }, - generation_create_params.GenerationCreateParamsStreaming + image_generate_params.ImageGenerateParamsStreaming if stream - else generation_create_params.GenerationCreateParamsNonStreaming, + else image_generate_params.ImageGenerateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=GenerationCreateResponse, + cast_to=ImageGenerateResponse, stream=stream or False, stream_cls=Stream[ImageGenStreamEvent], ) -class AsyncGenerationsResource(AsyncAPIResource): +class AsyncImagesResource(AsyncAPIResource): @cached_property - def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse: + def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers """ - return AsyncGenerationsResourceWithRawResponse(self) + return AsyncImagesResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response """ - return AsyncGenerationsResourceWithStreamingResponse(self) + return AsyncImagesResourceWithStreamingResponse(self) @overload - async def create( + async def generate( self, *, prompt: str, @@ -380,7 +380,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse: + ) -> ImageGenerateResponse: """ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest image generation model with automatic prompt optimization and enhanced visual @@ -438,7 +438,7 @@ async def create( ... @overload - async def create( + async def generate( self, *, prompt: str, @@ -517,7 +517,7 @@ async def create( ... @overload - async def create( + async def generate( self, *, prompt: str, @@ -538,7 +538,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + ) -> ImageGenerateResponse | AsyncStream[ImageGenStreamEvent]: """ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest image generation model with automatic prompt optimization and enhanced visual @@ -596,7 +596,7 @@ async def create( ... @required_args(["prompt"], ["prompt", "stream"]) - async def create( + async def generate( self, *, prompt: str, @@ -617,7 +617,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + ) -> ImageGenerateResponse | AsyncStream[ImageGenStreamEvent]: return await self._post( "/images/generations" if self._client._base_url_overridden @@ -637,50 +637,50 @@ async def create( "stream": stream, "user": user, }, - generation_create_params.GenerationCreateParamsStreaming + image_generate_params.ImageGenerateParamsStreaming if stream - else generation_create_params.GenerationCreateParamsNonStreaming, + else image_generate_params.ImageGenerateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=GenerationCreateResponse, + cast_to=ImageGenerateResponse, stream=stream or False, stream_cls=AsyncStream[ImageGenStreamEvent], ) -class GenerationsResourceWithRawResponse: - def __init__(self, generations: GenerationsResource) -> None: - self._generations = generations +class ImagesResourceWithRawResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images - self.create = to_raw_response_wrapper( - generations.create, + self.generate = to_raw_response_wrapper( + images.generate, ) -class AsyncGenerationsResourceWithRawResponse: - def __init__(self, generations: AsyncGenerationsResource) -> None: - self._generations = generations +class AsyncImagesResourceWithRawResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images - self.create = async_to_raw_response_wrapper( - generations.create, + self.generate = async_to_raw_response_wrapper( + images.generate, ) -class GenerationsResourceWithStreamingResponse: - def __init__(self, generations: GenerationsResource) -> None: - self._generations = generations +class ImagesResourceWithStreamingResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images - self.create = to_streamed_response_wrapper( - generations.create, + self.generate = to_streamed_response_wrapper( + images.generate, ) -class AsyncGenerationsResourceWithStreamingResponse: - def __init__(self, generations: AsyncGenerationsResource) -> None: - self._generations = generations +class AsyncImagesResourceWithStreamingResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images - self.create = async_to_streamed_response_wrapper( - generations.create, + self.generate = async_to_streamed_response_wrapper( + images.generate, ) diff --git a/src/gradient/resources/images/__init__.py b/src/gradient/resources/images/__init__.py deleted file mode 100644 index cf187f1d..00000000 --- a/src/gradient/resources/images/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .images import ( - ImagesResource, - AsyncImagesResource, - ImagesResourceWithRawResponse, - AsyncImagesResourceWithRawResponse, - ImagesResourceWithStreamingResponse, - AsyncImagesResourceWithStreamingResponse, -) -from .generations import ( - GenerationsResource, - AsyncGenerationsResource, - GenerationsResourceWithRawResponse, - AsyncGenerationsResourceWithRawResponse, - GenerationsResourceWithStreamingResponse, - AsyncGenerationsResourceWithStreamingResponse, -) - -__all__ = [ - "GenerationsResource", - "AsyncGenerationsResource", - "GenerationsResourceWithRawResponse", - "AsyncGenerationsResourceWithRawResponse", - "GenerationsResourceWithStreamingResponse", - "AsyncGenerationsResourceWithStreamingResponse", - "ImagesResource", - "AsyncImagesResource", - "ImagesResourceWithRawResponse", - "AsyncImagesResourceWithRawResponse", - "ImagesResourceWithStreamingResponse", - "AsyncImagesResourceWithStreamingResponse", -] diff --git a/src/gradient/resources/images/images.py b/src/gradient/resources/images/images.py deleted file mode 100644 index 37e7290f..00000000 --- a/src/gradient/resources/images/images.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .generations import ( - GenerationsResource, - AsyncGenerationsResource, - GenerationsResourceWithRawResponse, - AsyncGenerationsResourceWithRawResponse, - GenerationsResourceWithStreamingResponse, - AsyncGenerationsResourceWithStreamingResponse, -) - -__all__ = ["ImagesResource", "AsyncImagesResource"] - - -class ImagesResource(SyncAPIResource): - @cached_property - def generations(self) -> GenerationsResource: - return GenerationsResource(self._client) - - @cached_property - def with_raw_response(self) -> ImagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers - """ - return ImagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response - """ - return ImagesResourceWithStreamingResponse(self) - - -class AsyncImagesResource(AsyncAPIResource): - @cached_property - def generations(self) -> AsyncGenerationsResource: - return AsyncGenerationsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers - """ - return AsyncImagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response - """ - return AsyncImagesResourceWithStreamingResponse(self) - - -class ImagesResourceWithRawResponse: - def __init__(self, images: ImagesResource) -> None: - self._images = images - - @cached_property - def generations(self) -> GenerationsResourceWithRawResponse: - return GenerationsResourceWithRawResponse(self._images.generations) - - -class AsyncImagesResourceWithRawResponse: - def __init__(self, images: AsyncImagesResource) -> None: - self._images = images - - @cached_property - def generations(self) -> AsyncGenerationsResourceWithRawResponse: - return AsyncGenerationsResourceWithRawResponse(self._images.generations) - - -class ImagesResourceWithStreamingResponse: - def __init__(self, images: ImagesResource) -> None: - self._images = images - - @cached_property - def generations(self) -> GenerationsResourceWithStreamingResponse: - return GenerationsResourceWithStreamingResponse(self._images.generations) - - -class AsyncImagesResourceWithStreamingResponse: - def __init__(self, images: AsyncImagesResource) -> None: - self._images = images - - @cached_property - def generations(self) -> AsyncGenerationsResourceWithStreamingResponse: - return AsyncGenerationsResourceWithStreamingResponse(self._images.generations) diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py index 939e26f7..a7edfacb 100644 --- a/src/gradient/types/__init__.py +++ b/src/gradient/types/__init__.py @@ -66,10 +66,12 @@ from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse from .droplet_backup_policy import DropletBackupPolicy as DropletBackupPolicy +from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams +from .image_generate_response import ImageGenerateResponse as ImageGenerateResponse from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse diff --git a/src/gradient/types/images/generation_create_params.py b/src/gradient/types/image_generate_params.py similarity index 86% rename from src/gradient/types/images/generation_create_params.py rename to src/gradient/types/image_generate_params.py index ec8b672f..42e6144a 100644 --- a/src/gradient/types/images/generation_create_params.py +++ b/src/gradient/types/image_generate_params.py @@ -5,10 +5,10 @@ from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["GenerationCreateParamsBase", "GenerationCreateParamsNonStreaming", "GenerationCreateParamsStreaming"] +__all__ = ["ImageGenerateParamsBase", "ImageGenerateParamsNonStreaming", "ImageGenerateParamsStreaming"] -class GenerationCreateParamsBase(TypedDict, total=False): +class ImageGenerateParamsBase(TypedDict, total=False): prompt: Required[str] """A text description of the desired image(s). @@ -77,7 +77,7 @@ class GenerationCreateParamsBase(TypedDict, total=False): """ -class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False): +class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False): stream: Optional[Literal[False]] """ If set to true, partial image data will be streamed as the image is being @@ -87,7 +87,7 @@ class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False """ -class GenerationCreateParamsStreaming(GenerationCreateParamsBase): +class ImageGenerateParamsStreaming(ImageGenerateParamsBase): stream: Required[Literal[True]] """ If set to true, partial image data will be streamed as the image is being @@ -97,4 +97,4 @@ class GenerationCreateParamsStreaming(GenerationCreateParamsBase): """ -GenerationCreateParams = Union[GenerationCreateParamsNonStreaming, GenerationCreateParamsStreaming] +ImageGenerateParams = Union[ImageGenerateParamsNonStreaming, ImageGenerateParamsStreaming] diff --git a/src/gradient/types/images/generation_create_response.py b/src/gradient/types/image_generate_response.py similarity index 90% rename from src/gradient/types/images/generation_create_response.py rename to src/gradient/types/image_generate_response.py index 32757c06..5f97697c 100644 --- a/src/gradient/types/images/generation_create_response.py +++ b/src/gradient/types/image_generate_response.py @@ -2,9 +2,9 @@ from typing import List, Optional -from ..._models import BaseModel +from .._models import BaseModel -__all__ = ["GenerationCreateResponse", "Data", "Usage", "UsageInputTokensDetails"] +__all__ = ["ImageGenerateResponse", "Data", "Usage", "UsageInputTokensDetails"] class Data(BaseModel): @@ -40,7 +40,7 @@ class Usage(BaseModel): """Number of tokens in the generated output""" -class GenerationCreateResponse(BaseModel): +class ImageGenerateResponse(BaseModel): created: int """The Unix timestamp (in seconds) of when the images were created""" diff --git a/src/gradient/types/images/__init__.py b/src/gradient/types/images/__init__.py deleted file mode 100644 index 29634ec1..00000000 --- a/src/gradient/types/images/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .generation_create_params import GenerationCreateParams as GenerationCreateParams -from .generation_create_response import GenerationCreateResponse as GenerationCreateResponse diff --git a/tests/api_resources/images/__init__.py b/tests/api_resources/images/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/images/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/images/test_generations.py b/tests/api_resources/test_images.py similarity index 62% rename from tests/api_resources/images/test_generations.py rename to tests/api_resources/test_images.py index c9c67564..47428d02 100644 --- a/tests/api_resources/images/test_generations.py +++ b/tests/api_resources/test_images.py @@ -9,26 +9,26 @@ from gradient import Gradient, AsyncGradient from tests.utils import assert_matches_type -from gradient.types.images import GenerationCreateResponse +from gradient.types import ImageGenerateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestGenerations: +class TestImages: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_method_create_overload_1(self, client: Gradient) -> None: - generation = client.images.generations.create( + def test_method_generate_overload_1(self, client: Gradient) -> None: + image = client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", ) - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + assert_matches_type(ImageGenerateResponse, image, path=["response"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: - generation = client.images.generations.create( + def test_method_generate_with_all_params_overload_1(self, client: Gradient) -> None: + image = client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", background="auto", model="openai-gpt-image-1", @@ -42,47 +42,47 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non stream=False, user="user-1234", ) - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + assert_matches_type(ImageGenerateResponse, image, path=["response"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_raw_response_create_overload_1(self, client: Gradient) -> None: - response = client.images.generations.with_raw_response.create( + def test_raw_response_generate_overload_1(self, client: Gradient) -> None: + response = client.images.with_raw_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - generation = response.parse() - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + image = response.parse() + assert_matches_type(ImageGenerateResponse, image, path=["response"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_streaming_response_create_overload_1(self, client: Gradient) -> None: - with client.images.generations.with_streaming_response.create( + def test_streaming_response_generate_overload_1(self, client: Gradient) -> None: + with client.images.with_streaming_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - generation = response.parse() - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + image = response.parse() + assert_matches_type(ImageGenerateResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_method_create_overload_2(self, client: Gradient) -> None: - generation_stream = client.images.generations.create( + def test_method_generate_overload_2(self, client: Gradient) -> None: + image_stream = client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, ) - generation_stream.response.close() + image_stream.response.close() @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: - generation_stream = client.images.generations.create( + def test_method_generate_with_all_params_overload_2(self, client: Gradient) -> None: + image_stream = client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, background="auto", @@ -96,12 +96,12 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non size="auto", user="user-1234", ) - generation_stream.response.close() + image_stream.response.close() @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_raw_response_create_overload_2(self, client: Gradient) -> None: - response = client.images.generations.with_raw_response.create( + def test_raw_response_generate_overload_2(self, client: Gradient) -> None: + response = client.images.with_raw_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, ) @@ -112,8 +112,8 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None: @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - def test_streaming_response_create_overload_2(self, client: Gradient) -> None: - with client.images.generations.with_streaming_response.create( + def test_streaming_response_generate_overload_2(self, client: Gradient) -> None: + with client.images.with_streaming_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, ) as response: @@ -126,23 +126,23 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None: assert cast(Any, response.is_closed) is True -class TestAsyncGenerations: +class TestAsyncImages: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: - generation = await async_client.images.generations.create( + async def test_method_generate_overload_1(self, async_client: AsyncGradient) -> None: + image = await async_client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", ) - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + assert_matches_type(ImageGenerateResponse, image, path=["response"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: - generation = await async_client.images.generations.create( + async def test_method_generate_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: + image = await async_client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", background="auto", model="openai-gpt-image-1", @@ -156,47 +156,47 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, user="user-1234", ) - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + assert_matches_type(ImageGenerateResponse, image, path=["response"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: - response = await async_client.images.generations.with_raw_response.create( + async def test_raw_response_generate_overload_1(self, async_client: AsyncGradient) -> None: + response = await async_client.images.with_raw_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - generation = await response.parse() - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + image = await response.parse() + assert_matches_type(ImageGenerateResponse, image, path=["response"]) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: - async with async_client.images.generations.with_streaming_response.create( + async def test_streaming_response_generate_overload_1(self, async_client: AsyncGradient) -> None: + async with async_client.images.with_streaming_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - generation = await response.parse() - assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + image = await response.parse() + assert_matches_type(ImageGenerateResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: - generation_stream = await async_client.images.generations.create( + async def test_method_generate_overload_2(self, async_client: AsyncGradient) -> None: + image_stream = await async_client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, ) - await generation_stream.response.aclose() + await image_stream.response.aclose() @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: - generation_stream = await async_client.images.generations.create( + async def test_method_generate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: + image_stream = await async_client.images.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, background="auto", @@ -210,12 +210,12 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn size="auto", user="user-1234", ) - await generation_stream.response.aclose() + await image_stream.response.aclose() @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: - response = await async_client.images.generations.with_raw_response.create( + async def test_raw_response_generate_overload_2(self, async_client: AsyncGradient) -> None: + response = await async_client.images.with_raw_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, ) @@ -226,8 +226,8 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) @pytest.mark.skip(reason="Prism tests are disabled") @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: - async with async_client.images.generations.with_streaming_response.create( + async def test_streaming_response_generate_overload_2(self, async_client: AsyncGradient) -> None: + async with async_client.images.with_streaming_response.generate( prompt="A cute baby sea otter floating on its back in calm blue water", stream=True, ) as response: From 6f3521b8f8bb9dd0889a4fc43a94a59666b6df09 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 19:28:00 +0000 Subject: [PATCH 181/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1f73031b..ff1c7af5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.2.0" + ".": "3.3.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5f50da32..2f000bf1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.2.0" +version = "3.3.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 6607400d..f90aaf96 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.2.0" # x-release-please-version +__version__ = "3.3.0" # x-release-please-version From d235b700563d7b9de7449278bc6c32fb0f5ad5c8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 9 Oct 2025 19:10:28 +0000 Subject: [PATCH 182/200] feat(api): manual updates --- .stats.yml | 4 ++-- .../gpu_droplets/floating_ips/floating_ips.py | 12 ------------ .../types/agents/chat/completion_create_response.py | 2 +- .../types/chat/completion_create_response.py | 2 +- src/gradient/types/shared/chat_completion_chunk.py | 2 +- 5 files changed, 5 insertions(+), 17 deletions(-) diff --git a/.stats.yml b/.stats.yml index 60c46674..3f41901a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 175 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml -openapi_spec_hash: 83a3d092965fde776b29b61f785459f9 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-d15c113822740c8a5cd0d054d186c524ea6e15a9e64e8d0662b5a5a667745aaa.yml +openapi_spec_hash: b56f95892c05800b022f39d77087037b config_hash: 8497af1695ff361853c745dd869dc6b9 diff --git a/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py index 58b4bdb0..f55bfd41 100644 --- a/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py +++ b/src/gradient/resources/gpu_droplets/floating_ips/floating_ips.py @@ -79,9 +79,6 @@ def create( - To create a new floating IP reserved to a region, send a POST request to `/v2/floating_ips` with the `region` attribute. - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - Args: droplet_id: The ID of the Droplet that the floating IP will be assigned to. @@ -118,9 +115,6 @@ def create( - To create a new floating IP reserved to a region, send a POST request to `/v2/floating_ips` with the `region` attribute. - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - Args: region: The slug identifier for the region the floating IP will be reserved to. @@ -336,9 +330,6 @@ async def create( - To create a new floating IP reserved to a region, send a POST request to `/v2/floating_ips` with the `region` attribute. - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - Args: droplet_id: The ID of the Droplet that the floating IP will be assigned to. @@ -375,9 +366,6 @@ async def create( - To create a new floating IP reserved to a region, send a POST request to `/v2/floating_ips` with the `region` attribute. - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - Args: region: The slug identifier for the region the floating IP will be reserved to. diff --git a/src/gradient/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py index f3dedb4e..69b3d203 100644 --- a/src/gradient/types/agents/chat/completion_create_response.py +++ b/src/gradient/types/agents/chat/completion_create_response.py @@ -67,7 +67,7 @@ class ChoiceMessage(BaseModel): class Choice(BaseModel): - finish_reason: Literal["stop", "length", "tool_calls"] + finish_reason: Literal["stop", "length", "tool_calls", "content_filter"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop diff --git a/src/gradient/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py index 9e157aee..151f6556 100644 --- a/src/gradient/types/chat/completion_create_response.py +++ b/src/gradient/types/chat/completion_create_response.py @@ -67,7 +67,7 @@ class ChoiceMessage(BaseModel): class Choice(BaseModel): - finish_reason: Literal["stop", "length", "tool_calls"] + finish_reason: Literal["stop", "length", "tool_calls", "content_filter"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop diff --git a/src/gradient/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py index e30e0604..ff705bf4 100644 --- a/src/gradient/types/shared/chat_completion_chunk.py +++ b/src/gradient/types/shared/chat_completion_chunk.py @@ -71,7 +71,7 @@ class Choice(BaseModel): delta: ChoiceDelta """A chat completion delta generated by streamed model responses.""" - finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None + finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter"]] = None """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop From 292a434828ac9462258055ac901bd1ebcd74a192 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 9 Oct 2025 20:33:20 +0000 Subject: [PATCH 183/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ff1c7af5..2437b419 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.3.0" + ".": "3.4.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 2f000bf1..de684763 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.3.0" +version = "3.4.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index f90aaf96..440935e9 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.3.0" # x-release-please-version +__version__ = "3.4.0" # x-release-please-version From 2f9074e8ff2d696dcd85d10fad626f0dfc175c05 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 11 Oct 2025 02:37:31 +0000 Subject: [PATCH 184/200] chore(internal): detect missing future annotations with ruff --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index de684763..64829041 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -224,6 +224,8 @@ select = [ "B", # remove unused imports "F401", + # check for missing future annotations + "FA102", # bare except statements "E722", # unused arguments @@ -246,6 +248,8 @@ unfixable = [ "T203", ] +extend-safe-fixes = ["FA102"] + [tool.ruff.lint.flake8-tidy-imports.banned-api] "functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead" From 5ef309f08125570580a82b48522b5442b67414a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Oct 2025 18:48:21 +0000 Subject: [PATCH 185/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2437b419..bf0d0361 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.4.0" + ".": "3.5.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 64829041..54e233e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.4.0" +version = "3.5.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 440935e9..68c981bc 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.4.0" # x-release-please-version +__version__ = "3.5.0" # x-release-please-version From f62acff8c59168ec2fc38fa992f185acca359fc8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 16 Oct 2025 14:44:31 +0000 Subject: [PATCH 186/200] feat(api): manual updates --- .stats.yml | 6 +- README.md | 10 +- src/gradient/_client.py | 60 +++++++- tests/conftest.py | 17 +-- tests/test_client.py | 294 +++++----------------------------------- 5 files changed, 100 insertions(+), 287 deletions(-) diff --git a/.stats.yml b/.stats.yml index 3f41901a..af483f0a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 175 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-d15c113822740c8a5cd0d054d186c524ea6e15a9e64e8d0662b5a5a667745aaa.yml -openapi_spec_hash: b56f95892c05800b022f39d77087037b -config_hash: 8497af1695ff361853c745dd869dc6b9 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-cb3bf9b21459cad24410206c27a32fd31ef6cf86711700597549dbbd0d634002.yml +openapi_spec_hash: 6a9149a81ba15e7c5c5c1f4d77daad92 +config_hash: bad49c3bf949d5168ec3896bedff253a diff --git a/README.md b/README.md index 2feb1f88..56880ead 100644 --- a/README.md +++ b/README.md @@ -29,8 +29,8 @@ import os from gradient import Gradient client = Gradient( - access_token=os.environ.get( - "DIGITALOCEAN_ACCESS_TOKEN" + model_access_key=os.environ.get( + "GRADIENT_MODEL_ACCESS_KEY" ), # This is the default and can be omitted ) @@ -61,8 +61,8 @@ import asyncio from gradient import AsyncGradient client = AsyncGradient( - access_token=os.environ.get( - "DIGITALOCEAN_ACCESS_TOKEN" + model_access_key=os.environ.get( + "GRADIENT_MODEL_ACCESS_KEY" ), # This is the default and can be omitted ) @@ -106,7 +106,7 @@ from gradient import AsyncGradient async def main() -> None: async with AsyncGradient( - access_token="My Access Token", + model_access_key="My Model Access Key", http_client=DefaultAioHttpClient(), ) as client: completion = await client.chat.completions.create( diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 2e7de55d..22f4d2e6 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -208,11 +208,29 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: + return {**self._bearer_auth, **self._model_access_key, **self._agent_access_key} + + @property + def _bearer_auth(self) -> dict[str, str]: access_token = self.access_token if access_token is None: return {} return {"Authorization": f"Bearer {access_token}"} + @property + def _model_access_key(self) -> dict[str, str]: + model_access_key = self.model_access_key + if model_access_key is None: + return {} + return {"Authorization": f"Bearer {model_access_key}"} + + @property + def _agent_access_key(self) -> dict[str, str]: + agent_access_key = self.agent_access_key + if agent_access_key is None: + return {} + return {"Authorization": f"Bearer {agent_access_key}"} + @property @override def default_headers(self) -> dict[str, str | Omit]: @@ -229,8 +247,18 @@ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: if isinstance(custom_headers.get("Authorization"), Omit): return + if self.model_access_key and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + + if self.agent_access_key and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + raise TypeError( - '"Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected one of access_token, model_access_key or agent_access_key to be set. Or for one of the `Authorization`, `Authorization` or `Authorization` headers to be explicitly omitted"' ) def copy( @@ -481,11 +509,29 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: + return {**self._bearer_auth, **self._model_access_key, **self._agent_access_key} + + @property + def _bearer_auth(self) -> dict[str, str]: access_token = self.access_token if access_token is None: return {} return {"Authorization": f"Bearer {access_token}"} + @property + def _model_access_key(self) -> dict[str, str]: + model_access_key = self.model_access_key + if model_access_key is None: + return {} + return {"Authorization": f"Bearer {model_access_key}"} + + @property + def _agent_access_key(self) -> dict[str, str]: + agent_access_key = self.agent_access_key + if agent_access_key is None: + return {} + return {"Authorization": f"Bearer {agent_access_key}"} + @property @override def default_headers(self) -> dict[str, str | Omit]: @@ -502,8 +548,18 @@ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: if isinstance(custom_headers.get("Authorization"), Omit): return + if self.model_access_key and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + + if self.agent_access_key and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + raise TypeError( - '"Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected one of access_token, model_access_key or agent_access_key to be set. Or for one of the `Authorization`, `Authorization` or `Authorization` headers to be explicitly omitted"' ) def copy( diff --git a/tests/conftest.py b/tests/conftest.py index bc2aa92e..0a1890ae 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -46,8 +46,6 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") access_token = "My Access Token" -model_access_key = "My Model Access Key" -agent_access_key = "My Agent Access Key" @pytest.fixture(scope="session") @@ -56,13 +54,7 @@ def client(request: FixtureRequest) -> Iterator[Gradient]: if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - with Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=strict, - ) as client: + with Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=strict) as client: yield client @@ -87,11 +79,6 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradient]: raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") async with AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=strict, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=strict, http_client=http_client ) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index ed0d0180..681d87a1 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -39,8 +39,6 @@ base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") access_token = "My Access Token" -model_access_key = "My Model Access Key" -agent_access_key = "My Agent Access Key" def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: @@ -62,13 +60,7 @@ def _get_open_connections(client: Gradient | AsyncGradient) -> int: class TestGradient: - client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) def test_raw_response(self, respx_mock: MockRouter) -> None: @@ -98,14 +90,6 @@ def test_copy(self) -> None: assert copied.access_token == "another My Access Token" assert self.client.access_token == "My Access Token" - copied = self.client.copy(model_access_key="another My Model Access Key") - assert copied.model_access_key == "another My Model Access Key" - assert self.client.model_access_key == "My Model Access Key" - - copied = self.client.copy(agent_access_key="another My Agent Access Key") - assert copied.agent_access_key == "another My Agent Access Key" - assert self.client.agent_access_key == "My Agent Access Key" - def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) @@ -126,8 +110,6 @@ def test_copy_default_headers(self) -> None: client = Gradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -163,12 +145,7 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - default_query={"foo": "bar"}, + base_url=base_url, access_token=access_token, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" @@ -294,12 +271,7 @@ def test_request_timeout(self) -> None: def test_client_timeout_option(self) -> None: client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - timeout=httpx.Timeout(0), + base_url=base_url, access_token=access_token, _strict_response_validation=True, timeout=httpx.Timeout(0) ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -310,12 +282,7 @@ def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -325,12 +292,7 @@ def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -340,12 +302,7 @@ def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -358,8 +315,6 @@ async def test_invalid_http_client(self) -> None: Gradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -368,8 +323,6 @@ def test_default_headers_option(self) -> None: client = Gradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -380,8 +333,6 @@ def test_default_headers_option(self) -> None: client2 = Gradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -393,28 +344,16 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {access_token}" with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): - client2 = Gradient( - base_url=base_url, - access_token=None, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client2 = Gradient(base_url=base_url, access_token=None, _strict_response_validation=True) with pytest.raises( TypeError, - match="Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted", + match="Could not resolve authentication method. Expected one of access_token, model_access_key or agent_access_key to be set. Or for one of the `Authorization`, `Authorization` or `Authorization` headers to be explicitly omitted", ): client2._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -427,8 +366,6 @@ def test_default_query_option(self) -> None: client = Gradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -631,11 +568,7 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = Gradient( - base_url="https://example.com/from_init", - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, + base_url="https://example.com/from_init", access_token=access_token, _strict_response_validation=True ) assert client.base_url == "https://example.com/from_init/" @@ -645,12 +578,7 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): - client = Gradient( - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = Gradient(access_token=access_token, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( @@ -659,15 +587,11 @@ def test_base_url_env(self) -> None: Gradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -690,15 +614,11 @@ def test_base_url_trailing_slash(self, client: Gradient) -> None: Gradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -721,15 +641,11 @@ def test_base_url_no_trailing_slash(self, client: Gradient) -> None: Gradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -747,13 +663,7 @@ def test_absolute_request_url(self, client: Gradient) -> None: assert request.url == "https://myapi.com/foo" def test_copied_client_does_not_close_http(self) -> None: - client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() @@ -764,13 +674,7 @@ def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() def test_client_context_manager(self) -> None: - client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) with client as c2: assert c2 is client assert not c2.is_closed() @@ -794,8 +698,6 @@ def test_client_max_retries_validation(self) -> None: Gradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -818,24 +720,12 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + strict_client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=False, - ) + client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=False) response = client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -863,13 +753,7 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = Gradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) @@ -1065,13 +949,7 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: class TestAsyncGradient: - client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @@ -1103,14 +981,6 @@ def test_copy(self) -> None: assert copied.access_token == "another My Access Token" assert self.client.access_token == "My Access Token" - copied = self.client.copy(model_access_key="another My Model Access Key") - assert copied.model_access_key == "another My Model Access Key" - assert self.client.model_access_key == "My Model Access Key" - - copied = self.client.copy(agent_access_key="another My Agent Access Key") - assert copied.agent_access_key == "another My Agent Access Key" - assert self.client.agent_access_key == "My Agent Access Key" - def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) @@ -1131,8 +1001,6 @@ def test_copy_default_headers(self) -> None: client = AsyncGradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1168,12 +1036,7 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - default_query={"foo": "bar"}, + base_url=base_url, access_token=access_token, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" @@ -1299,12 +1162,7 @@ async def test_request_timeout(self) -> None: async def test_client_timeout_option(self) -> None: client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - timeout=httpx.Timeout(0), + base_url=base_url, access_token=access_token, _strict_response_validation=True, timeout=httpx.Timeout(0) ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1315,12 +1173,7 @@ async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1330,12 +1183,7 @@ async def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1345,12 +1193,7 @@ async def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - http_client=http_client, + base_url=base_url, access_token=access_token, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1363,8 +1206,6 @@ def test_invalid_http_client(self) -> None: AsyncGradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -1373,8 +1214,6 @@ def test_default_headers_option(self) -> None: client = AsyncGradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1385,8 +1224,6 @@ def test_default_headers_option(self) -> None: client2 = AsyncGradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -1398,28 +1235,16 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {access_token}" with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): - client2 = AsyncGradient( - base_url=base_url, - access_token=None, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client2 = AsyncGradient(base_url=base_url, access_token=None, _strict_response_validation=True) with pytest.raises( TypeError, - match="Could not resolve authentication method. Expected the access_token to be set. Or for the `Authorization` headers to be explicitly omitted", + match="Could not resolve authentication method. Expected one of access_token, model_access_key or agent_access_key to be set. Or for one of the `Authorization`, `Authorization` or `Authorization` headers to be explicitly omitted", ): client2._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1432,8 +1257,6 @@ def test_default_query_option(self) -> None: client = AsyncGradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -1636,11 +1459,7 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = AsyncGradient( - base_url="https://example.com/from_init", - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, + base_url="https://example.com/from_init", access_token=access_token, _strict_response_validation=True ) assert client.base_url == "https://example.com/from_init/" @@ -1650,12 +1469,7 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): - client = AsyncGradient( - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = AsyncGradient(access_token=access_token, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( @@ -1664,15 +1478,11 @@ def test_base_url_env(self) -> None: AsyncGradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1695,15 +1505,11 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: AsyncGradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1726,15 +1532,11 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: AsyncGradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1752,13 +1554,7 @@ def test_absolute_request_url(self, client: AsyncGradient) -> None: assert request.url == "https://myapi.com/foo" async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() @@ -1770,13 +1566,7 @@ async def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) async with client as c2: assert c2 is client assert not c2.is_closed() @@ -1801,8 +1591,6 @@ async def test_client_max_retries_validation(self) -> None: AsyncGradient( base_url=base_url, access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -1827,24 +1615,12 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + strict_client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=False, - ) + client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=False) response = await client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -1873,13 +1649,7 @@ class Model(BaseModel): @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncGradient( - base_url=base_url, - access_token=access_token, - model_access_key=model_access_key, - agent_access_key=agent_access_key, - _strict_response_validation=True, - ) + client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) From 5674dbcb4bae48e46b462a0bc672d341c881f105 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 16 Oct 2025 17:43:25 +0000 Subject: [PATCH 187/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bf0d0361..f391d416 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.5.0" + ".": "3.6.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 54e233e6..c5d78df0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.5.0" +version = "3.6.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 68c981bc..0190d688 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.5.0" # x-release-please-version +__version__ = "3.6.0" # x-release-please-version From e5d4eeac735edf694e517de34c247bce6ec1a9f9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 18 Oct 2025 02:23:27 +0000 Subject: [PATCH 188/200] chore: bump `httpx-aiohttp` version to 0.1.9 --- pyproject.toml | 2 +- requirements-dev.lock | 2 +- requirements.lock | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c5d78df0..07e0ccc0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ Homepage = "https://github.com/digitalocean/gradient-python" Repository = "https://github.com/digitalocean/gradient-python" [project.optional-dependencies] -aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"] [tool.rye] managed = true diff --git a/requirements-dev.lock b/requirements-dev.lock index 896c8c3a..e5307af8 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -56,7 +56,7 @@ httpx==0.28.1 # via gradient # via httpx-aiohttp # via respx -httpx-aiohttp==0.1.8 +httpx-aiohttp==0.1.9 # via gradient idna==3.4 # via anyio diff --git a/requirements.lock b/requirements.lock index 1fce47a6..8c60e6c5 100644 --- a/requirements.lock +++ b/requirements.lock @@ -43,7 +43,7 @@ httpcore==1.0.9 httpx==0.28.1 # via gradient # via httpx-aiohttp -httpx-aiohttp==0.1.8 +httpx-aiohttp==0.1.9 # via gradient idna==3.4 # via anyio From 7a9be259025f878a8cf1009749b9a0ce5f55d2a3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 02:53:04 +0000 Subject: [PATCH 189/200] fix(client): close streams without requiring full consumption --- src/gradient/_streaming.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/gradient/_streaming.py b/src/gradient/_streaming.py index eb9be89d..df2a5870 100644 --- a/src/gradient/_streaming.py +++ b/src/gradient/_streaming.py @@ -76,9 +76,8 @@ def __stream__(self) -> Iterator[_T]: yield process_data(data=data, cast_to=cast_to, response=response) - # Ensure the entire stream is consumed - for _sse in iterator: - ... + # As we might not fully consume the response stream, we need to close it explicitly + response.close() def __enter__(self) -> Self: return self @@ -158,9 +157,8 @@ async def __stream__(self) -> AsyncIterator[_T]: yield process_data(data=data, cast_to=cast_to, response=response) - # Ensure the entire stream is consumed - async for _sse in iterator: - ... + # As we might not fully consume the response stream, we need to close it explicitly + await response.aclose() async def __aenter__(self) -> Self: return self From 7dc80e1773bbec0796ca920b1c8462d5549c3f59 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:06:12 +0000 Subject: [PATCH 190/200] feat(api): add inference errors --- .stats.yml | 4 +- src/gradient/resources/agents/agents.py | 30 ++++++++++++++ src/gradient/types/agent_create_params.py | 5 +++ src/gradient/types/agent_list_response.py | 3 ++ src/gradient/types/agent_update_params.py | 9 +++++ src/gradient/types/api_agent.py | 40 +++++++++++++++++++ .../types/knowledge_base_create_params.py | 16 +++++++- .../api_indexed_data_source.py | 1 + .../types/knowledge_bases/api_indexing_job.py | 15 ++++++- .../api_knowledge_base_data_source.py | 12 +++++- .../api_web_crawler_data_source.py | 5 ++- .../api_web_crawler_data_source_param.py | 5 +++ .../knowledge_bases/test_data_sources.py | 2 + tests/api_resources/test_agents.py | 8 ++++ tests/api_resources/test_knowledge_bases.py | 10 +++++ 15 files changed, 159 insertions(+), 6 deletions(-) diff --git a/.stats.yml b/.stats.yml index af483f0a..f8627752 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 175 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-cb3bf9b21459cad24410206c27a32fd31ef6cf86711700597549dbbd0d634002.yml -openapi_spec_hash: 6a9149a81ba15e7c5c5c1f4d77daad92 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-f07d74847e620dfa26d8df40ea4680814af9bba381b3a57a7b6ed76ad49d85f8.yml +openapi_spec_hash: e3553dc2abf2afd4368b736bcc32a289 config_hash: bad49c3bf949d5168ec3896bedff253a diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py index a4d32fca..332fb672 100644 --- a/src/gradient/resources/agents/agents.py +++ b/src/gradient/resources/agents/agents.py @@ -184,12 +184,14 @@ def create( description: str | Omit = omit, instruction: str | Omit = omit, knowledge_base_uuid: SequenceNotStr[str] | Omit = omit, + model_provider_key_uuid: str | Omit = omit, model_uuid: str | Omit = omit, name: str | Omit = omit, openai_key_uuid: str | Omit = omit, project_id: str | Omit = omit, region: str | Omit = omit, tags: SequenceNotStr[str] | Omit = omit, + workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -226,6 +228,8 @@ def create( tags: Agent tag to organize related resources + workspace_uuid: Identifier for the workspace + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -244,12 +248,14 @@ def create( "description": description, "instruction": instruction, "knowledge_base_uuid": knowledge_base_uuid, + "model_provider_key_uuid": model_provider_key_uuid, "model_uuid": model_uuid, "name": name, "openai_key_uuid": openai_key_uuid, "project_id": project_id, "region": region, "tags": tags, + "workspace_uuid": workspace_uuid, }, agent_create_params.AgentCreateParams, ), @@ -301,12 +307,14 @@ def update( path_uuid: str, *, agent_log_insights_enabled: bool | Omit = omit, + allowed_domains: SequenceNotStr[str] | Omit = omit, anthropic_key_uuid: str | Omit = omit, conversation_logs_enabled: bool | Omit = omit, description: str | Omit = omit, instruction: str | Omit = omit, k: int | Omit = omit, max_tokens: int | Omit = omit, + model_provider_key_uuid: str | Omit = omit, model_uuid: str | Omit = omit, name: str | Omit = omit, openai_key_uuid: str | Omit = omit, @@ -330,6 +338,9 @@ def update( response body is a JSON object containing the agent. Args: + allowed_domains: Optional list of allowed domains for the chatbot - Must use fully qualified + domain name (FQDN) such as https://example.com + anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models conversation_logs_enabled: Optional update of conversation logs enabled @@ -347,6 +358,8 @@ def update( or output, set as a number between 1 and 512. This determines the length of each response. + model_provider_key_uuid: Optional Model Provider uuid for use with provider models + model_uuid: Identifier for the foundation model. name: Agent name @@ -390,12 +403,14 @@ def update( body=maybe_transform( { "agent_log_insights_enabled": agent_log_insights_enabled, + "allowed_domains": allowed_domains, "anthropic_key_uuid": anthropic_key_uuid, "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, "max_tokens": max_tokens, + "model_provider_key_uuid": model_provider_key_uuid, "model_uuid": model_uuid, "name": name, "openai_key_uuid": openai_key_uuid, @@ -680,12 +695,14 @@ async def create( description: str | Omit = omit, instruction: str | Omit = omit, knowledge_base_uuid: SequenceNotStr[str] | Omit = omit, + model_provider_key_uuid: str | Omit = omit, model_uuid: str | Omit = omit, name: str | Omit = omit, openai_key_uuid: str | Omit = omit, project_id: str | Omit = omit, region: str | Omit = omit, tags: SequenceNotStr[str] | Omit = omit, + workspace_uuid: str | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -722,6 +739,8 @@ async def create( tags: Agent tag to organize related resources + workspace_uuid: Identifier for the workspace + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -740,12 +759,14 @@ async def create( "description": description, "instruction": instruction, "knowledge_base_uuid": knowledge_base_uuid, + "model_provider_key_uuid": model_provider_key_uuid, "model_uuid": model_uuid, "name": name, "openai_key_uuid": openai_key_uuid, "project_id": project_id, "region": region, "tags": tags, + "workspace_uuid": workspace_uuid, }, agent_create_params.AgentCreateParams, ), @@ -797,12 +818,14 @@ async def update( path_uuid: str, *, agent_log_insights_enabled: bool | Omit = omit, + allowed_domains: SequenceNotStr[str] | Omit = omit, anthropic_key_uuid: str | Omit = omit, conversation_logs_enabled: bool | Omit = omit, description: str | Omit = omit, instruction: str | Omit = omit, k: int | Omit = omit, max_tokens: int | Omit = omit, + model_provider_key_uuid: str | Omit = omit, model_uuid: str | Omit = omit, name: str | Omit = omit, openai_key_uuid: str | Omit = omit, @@ -826,6 +849,9 @@ async def update( response body is a JSON object containing the agent. Args: + allowed_domains: Optional list of allowed domains for the chatbot - Must use fully qualified + domain name (FQDN) such as https://example.com + anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models conversation_logs_enabled: Optional update of conversation logs enabled @@ -843,6 +869,8 @@ async def update( or output, set as a number between 1 and 512. This determines the length of each response. + model_provider_key_uuid: Optional Model Provider uuid for use with provider models + model_uuid: Identifier for the foundation model. name: Agent name @@ -886,12 +914,14 @@ async def update( body=await async_maybe_transform( { "agent_log_insights_enabled": agent_log_insights_enabled, + "allowed_domains": allowed_domains, "anthropic_key_uuid": anthropic_key_uuid, "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, "max_tokens": max_tokens, + "model_provider_key_uuid": model_provider_key_uuid, "model_uuid": model_uuid, "name": name, "openai_key_uuid": openai_key_uuid, diff --git a/src/gradient/types/agent_create_params.py b/src/gradient/types/agent_create_params.py index db84a258..343c5d70 100644 --- a/src/gradient/types/agent_create_params.py +++ b/src/gradient/types/agent_create_params.py @@ -28,6 +28,8 @@ class AgentCreateParams(TypedDict, total=False): knowledge_base_uuid: SequenceNotStr[str] """Ids of the knowledge base(s) to attach to the agent""" + model_provider_key_uuid: str + model_uuid: str """Identifier for the foundation model.""" @@ -45,3 +47,6 @@ class AgentCreateParams(TypedDict, total=False): tags: SequenceNotStr[str] """Agent tag to organize related resources""" + + workspace_uuid: str + """Identifier for the workspace""" diff --git a/src/gradient/types/agent_list_response.py b/src/gradient/types/agent_list_response.py index 7a64c66e..c461f152 100644 --- a/src/gradient/types/agent_list_response.py +++ b/src/gradient/types/agent_list_response.py @@ -24,6 +24,8 @@ class AgentChatbot(BaseModel): + allowed_domains: Optional[List[str]] = None + button_background_color: Optional[str] = None logo: Optional[str] = None @@ -61,6 +63,7 @@ class AgentDeployment(BaseModel): "STATUS_UNDEPLOYING", "STATUS_UNDEPLOYMENT_FAILED", "STATUS_DELETED", + "STATUS_BUILDING", ] ] = None diff --git a/src/gradient/types/agent_update_params.py b/src/gradient/types/agent_update_params.py index 75c30cba..5026beaa 100644 --- a/src/gradient/types/agent_update_params.py +++ b/src/gradient/types/agent_update_params.py @@ -14,6 +14,12 @@ class AgentUpdateParams(TypedDict, total=False): agent_log_insights_enabled: bool + allowed_domains: SequenceNotStr[str] + """ + Optional list of allowed domains for the chatbot - Must use fully qualified + domain name (FQDN) such as https://example.com + """ + anthropic_key_uuid: str """Optional anthropic key uuid for use with anthropic models""" @@ -41,6 +47,9 @@ class AgentUpdateParams(TypedDict, total=False): response. """ + model_provider_key_uuid: str + """Optional Model Provider uuid for use with provider models""" + model_uuid: str """Identifier for the foundation model.""" diff --git a/src/gradient/types/api_agent.py b/src/gradient/types/api_agent.py index abfbe828..f52e44c8 100644 --- a/src/gradient/types/api_agent.py +++ b/src/gradient/types/api_agent.py @@ -6,6 +6,8 @@ from datetime import datetime from typing_extensions import Literal +from pydantic import Field as FieldInfo + from .._models import BaseModel from .api_agent_model import APIAgentModel from .api_knowledge_base import APIKnowledgeBase @@ -24,6 +26,7 @@ "Function", "Guardrail", "LoggingConfig", + "ModelProviderKey", "Template", "TemplateGuardrail", ] @@ -35,6 +38,8 @@ class APIKey(BaseModel): class Chatbot(BaseModel): + allowed_domains: Optional[List[str]] = None + button_background_color: Optional[str] = None logo: Optional[str] = None @@ -72,6 +77,7 @@ class Deployment(BaseModel): "STATUS_UNDEPLOYING", "STATUS_UNDEPLOYMENT_FAILED", "STATUS_DELETED", + "STATUS_BUILDING", ] ] = None @@ -186,6 +192,33 @@ class LoggingConfig(BaseModel): """Name of the log stream""" +class ModelProviderKey(BaseModel): + api_key_uuid: Optional[str] = None + """API key ID""" + + created_at: Optional[datetime] = None + """Key creation date""" + + created_by: Optional[str] = None + """Created by user id from DO""" + + deleted_at: Optional[datetime] = None + """Key deleted date""" + + models: Optional[List[APIAgentModel]] = None + """Models supported by the openAI api key""" + + name: Optional[str] = None + """Name of the key""" + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + """Key last updated date""" + + class TemplateGuardrail(BaseModel): priority: Optional[int] = None """Priority of the guardrail""" @@ -311,6 +344,8 @@ class APIAgent(BaseModel): model: Optional[APIAgentModel] = None """Description of a Model""" + api_model_provider_key: Optional[ModelProviderKey] = FieldInfo(alias="model_provider_key", default=None) + name: Optional[str] = None """Agent name""" @@ -372,6 +407,11 @@ class APIAgent(BaseModel): version_hash: Optional[str] = None """The latest version of the agent""" + vpc_egress_ips: Optional[List[str]] = None + """VPC Egress IPs""" + + vpc_uuid: Optional[str] = None + workspace: Optional["APIWorkspace"] = None diff --git a/src/gradient/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py index e40bd598..4dc42098 100644 --- a/src/gradient/types/knowledge_base_create_params.py +++ b/src/gradient/types/knowledge_base_create_params.py @@ -11,7 +11,7 @@ from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceDropboxDataSource"] +__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceDropboxDataSource", "DatasourceGoogleDriveDataSource"] class KnowledgeBaseCreateParams(TypedDict, total=False): @@ -63,6 +63,17 @@ class DatasourceDropboxDataSource(TypedDict, total=False): """ +class DatasourceGoogleDriveDataSource(TypedDict, total=False): + folder_id: str + + refresh_token: str + """Refresh token. + + you can obrain a refresh token by following the oauth2 flow. see + /v2/gen-ai/oauth2/google/tokens for reference. + """ + + class Datasource(TypedDict, total=False): aws_data_source: AwsDataSourceParam """AWS S3 Data Source""" @@ -79,6 +90,9 @@ class Datasource(TypedDict, total=False): file_upload_data_source: APIFileUploadDataSourceParam """File to upload as data source for knowledge base.""" + google_drive_data_source: DatasourceGoogleDriveDataSource + """Google Drive Data Source""" + item_path: str spaces_data_source: APISpacesDataSourceParam diff --git a/src/gradient/types/knowledge_bases/api_indexed_data_source.py b/src/gradient/types/knowledge_bases/api_indexed_data_source.py index 151b29de..3f011582 100644 --- a/src/gradient/types/knowledge_bases/api_indexed_data_source.py +++ b/src/gradient/types/knowledge_bases/api_indexed_data_source.py @@ -48,6 +48,7 @@ class APIIndexedDataSource(BaseModel): "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", "DATA_SOURCE_STATUS_NOT_UPDATED", "DATA_SOURCE_STATUS_FAILED", + "DATA_SOURCE_STATUS_CANCELLED", ] ] = None diff --git a/src/gradient/types/knowledge_bases/api_indexing_job.py b/src/gradient/types/knowledge_bases/api_indexing_job.py index 312e465c..93124cf8 100644 --- a/src/gradient/types/knowledge_bases/api_indexing_job.py +++ b/src/gradient/types/knowledge_bases/api_indexing_job.py @@ -5,6 +5,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from .api_indexed_data_source import APIIndexedDataSource __all__ = ["APIIndexingJob"] @@ -16,10 +17,16 @@ class APIIndexingJob(BaseModel): created_at: Optional[datetime] = None """Creation date / time""" + data_source_jobs: Optional[List[APIIndexedDataSource]] = None + """Details on Data Sources included in the Indexing Job""" + data_source_uuids: Optional[List[str]] = None finished_at: Optional[datetime] = None + is_report_available: Optional[bool] = None + """Boolean value to determine if the indexing job details are available""" + knowledge_base_uuid: Optional[str] = None """Knowledge base id""" @@ -50,7 +57,7 @@ class APIIndexingJob(BaseModel): ] = None tokens: Optional[int] = None - """Number of tokens""" + """Number of tokens [This field is deprecated]""" total_datasources: Optional[int] = None """Number of datasources being indexed""" @@ -61,9 +68,15 @@ class APIIndexingJob(BaseModel): total_items_indexed: Optional[str] = None """Total Items Indexed""" + total_items_removed: Optional[str] = None + """Total Items Removed""" + total_items_skipped: Optional[str] = None """Total Items Skipped""" + total_tokens: Optional[str] = None + """Total Tokens Consumed By the Indexing Job""" + updated_at: Optional[datetime] = None """Last modified""" diff --git a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py index ed370eb5..223797c7 100644 --- a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py @@ -10,7 +10,7 @@ from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource -__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource", "DropboxDataSource"] +__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource", "DropboxDataSource", "GoogleDriveDataSource"] class AwsDataSource(BaseModel): @@ -27,6 +27,13 @@ class DropboxDataSource(BaseModel): folder: Optional[str] = None +class GoogleDriveDataSource(BaseModel): + folder_id: Optional[str] = None + + folder_name: Optional[str] = None + """Name of the selected folder if available""" + + class APIKnowledgeBaseDataSource(BaseModel): aws_data_source: Optional[AwsDataSource] = None """AWS S3 Data Source for Display""" @@ -43,6 +50,9 @@ class APIKnowledgeBaseDataSource(BaseModel): file_upload_data_source: Optional[APIFileUploadDataSource] = None """File to upload as data source for knowledge base.""" + google_drive_data_source: Optional[GoogleDriveDataSource] = None + """Google Drive Data Source for Display""" + item_path: Optional[str] = None """Path of folder or object in bucket - Deprecated, moved to data_source_details""" diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py index 4690c607..63c9111a 100644 --- a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py +++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel @@ -24,3 +24,6 @@ class APIWebCrawlerDataSource(BaseModel): embed_media: Optional[bool] = None """Whether to ingest and index media (images, etc.) on web pages.""" + + exclude_tags: Optional[List[str]] = None + """Declaring which tags to exclude in web pages while webcrawling""" diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py index 2345ed3a..17988e73 100644 --- a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py +++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py @@ -4,6 +4,8 @@ from typing_extensions import Literal, TypedDict +from ..._types import SequenceNotStr + __all__ = ["APIWebCrawlerDataSourceParam"] @@ -23,3 +25,6 @@ class APIWebCrawlerDataSourceParam(TypedDict, total=False): embed_media: bool """Whether to ingest and index media (images, etc.) on web pages.""" + + exclude_tags: SequenceNotStr[str] + """Declaring which tags to exclude in web pages while webcrawling""" diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 4214f880..ca721d93 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -52,6 +52,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, + "exclude_tags": ["example string"], }, ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -273,6 +274,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, + "exclude_tags": ["example string"], }, ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2069d2d8..bf495274 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -39,12 +39,14 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: description='"My Agent Description"', instruction='"You are an agent who thinks deeply about the world"', knowledge_base_uuid=["example string"], + model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"', model_uuid='"12345678-1234-1234-1234-123456789012"', name='"My Agent"', openai_key_uuid='"12345678-1234-1234-1234-123456789012"', project_id='"12345678-1234-1234-1234-123456789012"', region='"tor1"', tags=["example string"], + workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -126,12 +128,14 @@ def test_method_update_with_all_params(self, client: Gradient) -> None: agent = client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_log_insights_enabled=True, + allowed_domains=["example string"], anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', conversation_logs_enabled=True, description='"My Agent Description"', instruction='"You are an agent who thinks deeply about the world"', k=5, max_tokens=100, + model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"', model_uuid='"12345678-1234-1234-1234-123456789012"', name='"My New Agent Name"', openai_key_uuid='"12345678-1234-1234-1234-123456789012"', @@ -383,12 +387,14 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) description='"My Agent Description"', instruction='"You are an agent who thinks deeply about the world"', knowledge_base_uuid=["example string"], + model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"', model_uuid='"12345678-1234-1234-1234-123456789012"', name='"My Agent"', openai_key_uuid='"12345678-1234-1234-1234-123456789012"', project_id='"12345678-1234-1234-1234-123456789012"', region='"tor1"', tags=["example string"], + workspace_uuid="123e4567-e89b-12d3-a456-426614174000", ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -470,12 +476,14 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient) agent = await async_client.agents.update( path_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_log_insights_enabled=True, + allowed_domains=["example string"], anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', conversation_logs_enabled=True, description='"My Agent Description"', instruction='"You are an agent who thinks deeply about the world"', k=5, max_tokens=100, + model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"', model_uuid='"12345678-1234-1234-1234-123456789012"', name='"My New Agent Name"', openai_key_uuid='"12345678-1234-1234-1234-123456789012"', diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 62965775..3e02f057 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -54,6 +54,10 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: "size_in_bytes": "12345", "stored_object_key": "example string", }, + "google_drive_data_source": { + "folder_id": "123e4567-e89b-12d3-a456-426614174000", + "refresh_token": "example string", + }, "item_path": "example string", "spaces_data_source": { "bucket_name": "example name", @@ -64,6 +68,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None: "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, + "exclude_tags": ["example string"], }, } ], @@ -312,6 +317,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) "size_in_bytes": "12345", "stored_object_key": "example string", }, + "google_drive_data_source": { + "folder_id": "123e4567-e89b-12d3-a456-426614174000", + "refresh_token": "example string", + }, "item_path": "example string", "spaces_data_source": { "bucket_name": "example name", @@ -322,6 +331,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient) "base_url": "example string", "crawling_option": "UNKNOWN", "embed_media": True, + "exclude_tags": ["example string"], }, } ], From 960d4cd71ee2f97c1e260d49b7b2a2f7d6184b98 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 04:11:02 +0000 Subject: [PATCH 191/200] chore(internal/tests): avoid race condition with implicit client cleanup --- tests/test_client.py | 373 +++++++++++++++++++++++-------------------- 1 file changed, 204 insertions(+), 169 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index 681d87a1..7b421637 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -60,51 +60,49 @@ def _get_open_connections(client: Gradient | AsyncGradient) -> int: class TestGradient: - client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - @pytest.mark.respx(base_url=base_url) - def test_raw_response(self, respx_mock: MockRouter) -> None: + def test_raw_response(self, respx_mock: MockRouter, client: Gradient) -> None: respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = self.client.post("/foo", cast_to=httpx.Response) + response = client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} @pytest.mark.respx(base_url=base_url) - def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: + def test_raw_response_for_binary(self, respx_mock: MockRouter, client: Gradient) -> None: respx_mock.post("/foo").mock( return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') ) - response = self.client.post("/foo", cast_to=httpx.Response) + response = client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} - def test_copy(self) -> None: - copied = self.client.copy() - assert id(copied) != id(self.client) + def test_copy(self, client: Gradient) -> None: + copied = client.copy() + assert id(copied) != id(client) - copied = self.client.copy(access_token="another My Access Token") + copied = client.copy(access_token="another My Access Token") assert copied.access_token == "another My Access Token" - assert self.client.access_token == "My Access Token" + assert client.access_token == "My Access Token" - def test_copy_default_options(self) -> None: + def test_copy_default_options(self, client: Gradient) -> None: # options that have a default are overridden correctly - copied = self.client.copy(max_retries=7) + copied = client.copy(max_retries=7) assert copied.max_retries == 7 - assert self.client.max_retries == 2 + assert client.max_retries == 2 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 assert copied.max_retries == 7 # timeout - assert isinstance(self.client.timeout, httpx.Timeout) - copied = self.client.copy(timeout=None) + assert isinstance(client.timeout, httpx.Timeout) + copied = client.copy(timeout=None) assert copied.timeout is None - assert isinstance(self.client.timeout, httpx.Timeout) + assert isinstance(client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: client = Gradient( @@ -142,6 +140,7 @@ def test_copy_default_headers(self) -> None: match="`default_headers` and `set_default_headers` arguments are mutually exclusive", ): client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + client.close() def test_copy_default_query(self) -> None: client = Gradient( @@ -179,13 +178,15 @@ def test_copy_default_query(self) -> None: ): client.copy(set_default_query={}, default_query={"foo": "Bar"}) - def test_copy_signature(self) -> None: + client.close() + + def test_copy_signature(self, client: Gradient) -> None: # ensure the same parameters that can be passed to the client are defined in the `.copy()` method init_signature = inspect.signature( # mypy doesn't like that we access the `__init__` property. - self.client.__init__, # type: ignore[misc] + client.__init__, # type: ignore[misc] ) - copy_signature = inspect.signature(self.client.copy) + copy_signature = inspect.signature(client.copy) exclude_params = {"transport", "proxies", "_strict_response_validation"} for name in init_signature.parameters.keys(): @@ -196,12 +197,12 @@ def test_copy_signature(self) -> None: assert copy_param is not None, f"copy() signature is missing the {name} param" @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") - def test_copy_build_request(self) -> None: + def test_copy_build_request(self, client: Gradient) -> None: options = FinalRequestOptions(method="get", url="/foo") def build_request(options: FinalRequestOptions) -> None: - client = self.client.copy() - client._build_request(options) + client_copy = client.copy() + client_copy._build_request(options) # ensure that the machinery is warmed up before tracing starts. build_request(options) @@ -258,14 +259,12 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic print(frame) raise AssertionError() - def test_request_timeout(self) -> None: - request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + def test_request_timeout(self, client: Gradient) -> None: + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT - request = self.client._build_request( - FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) - ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(100.0) @@ -278,6 +277,8 @@ def test_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(0) + client.close() + def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: @@ -289,6 +290,8 @@ def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) + client.close() + # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: client = Gradient( @@ -299,6 +302,8 @@ def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT + client.close() + # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = Gradient( @@ -309,6 +314,8 @@ def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default + client.close() + async def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): async with httpx.AsyncClient() as http_client: @@ -320,17 +327,17 @@ async def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = Gradient( + test_client = Gradient( base_url=base_url, access_token=access_token, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = Gradient( + test_client2 = Gradient( base_url=base_url, access_token=access_token, _strict_response_validation=True, @@ -339,10 +346,13 @@ def test_default_headers_option(self) -> None: "X-Stainless-Lang": "my-overriding-header", }, ) - request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" + test_client.close() + test_client2.close() + def test_validate_headers(self) -> None: client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -383,8 +393,10 @@ def test_default_query_option(self) -> None: url = httpx.URL(request.url) assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} - def test_request_extra_json(self) -> None: - request = self.client._build_request( + client.close() + + def test_request_extra_json(self, client: Gradient) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -395,7 +407,7 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": False} - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -406,7 +418,7 @@ def test_request_extra_json(self) -> None: assert data == {"baz": False} # `extra_json` takes priority over `json_data` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -417,8 +429,8 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": None} - def test_request_extra_headers(self) -> None: - request = self.client._build_request( + def test_request_extra_headers(self, client: Gradient) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -428,7 +440,7 @@ def test_request_extra_headers(self) -> None: assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash - request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + request = client.with_options(default_headers={"X-Bar": "true"})._build_request( FinalRequestOptions( method="post", url="/foo", @@ -439,8 +451,8 @@ def test_request_extra_headers(self) -> None: ) assert request.headers.get("X-Bar") == "false" - def test_request_extra_query(self) -> None: - request = self.client._build_request( + def test_request_extra_query(self, client: Gradient) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -453,7 +465,7 @@ def test_request_extra_query(self) -> None: assert params == {"my_query_param": "Foo"} # if both `query` and `extra_query` are given, they are merged - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -467,7 +479,7 @@ def test_request_extra_query(self) -> None: assert params == {"bar": "1", "foo": "2"} # `extra_query` takes priority over `query` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -510,7 +522,7 @@ def test_multipart_repeating_array(self, client: Gradient) -> None: ] @pytest.mark.respx(base_url=base_url) - def test_basic_union_response(self, respx_mock: MockRouter) -> None: + def test_basic_union_response(self, respx_mock: MockRouter, client: Gradient) -> None: class Model1(BaseModel): name: str @@ -519,12 +531,12 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" @pytest.mark.respx(base_url=base_url) - def test_union_response_different_types(self, respx_mock: MockRouter) -> None: + def test_union_response_different_types(self, respx_mock: MockRouter, client: Gradient) -> None: """Union of objects with the same field name using a different type""" class Model1(BaseModel): @@ -535,18 +547,18 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) - response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) - def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter, client: Gradient) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ @@ -562,7 +574,7 @@ class Model(BaseModel): ) ) - response = self.client.get("/foo", cast_to=Model) + response = client.get("/foo", cast_to=Model) assert isinstance(response, Model) assert response.foo == 2 @@ -576,6 +588,8 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" + client.close() + def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = Gradient(access_token=access_token, _strict_response_validation=True) @@ -607,6 +621,7 @@ def test_base_url_trailing_slash(self, client: Gradient) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + client.close() @pytest.mark.parametrize( "client", @@ -634,6 +649,7 @@ def test_base_url_no_trailing_slash(self, client: Gradient) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + client.close() @pytest.mark.parametrize( "client", @@ -661,35 +677,36 @@ def test_absolute_request_url(self, client: Gradient) -> None: ), ) assert request.url == "https://myapi.com/foo" + client.close() def test_copied_client_does_not_close_http(self) -> None: - client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - assert not client.is_closed() + test_client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) + assert not test_client.is_closed() - copied = client.copy() - assert copied is not client + copied = test_client.copy() + assert copied is not test_client del copied - assert not client.is_closed() + assert not test_client.is_closed() def test_client_context_manager(self) -> None: - client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - with client as c2: - assert c2 is client + test_client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) + with test_client as c2: + assert c2 is test_client assert not c2.is_closed() - assert not client.is_closed() - assert client.is_closed() + assert not test_client.is_closed() + assert test_client.is_closed() @pytest.mark.respx(base_url=base_url) - def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + def test_client_response_validation_error(self, respx_mock: MockRouter, client: Gradient) -> None: class Model(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) with pytest.raises(APIResponseValidationError) as exc: - self.client.get("/foo", cast_to=Model) + client.get("/foo", cast_to=Model) assert isinstance(exc.value.__cause__, ValidationError) @@ -703,13 +720,13 @@ def test_client_max_retries_validation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - def test_default_stream_cls(self, respx_mock: MockRouter) -> None: + def test_default_stream_cls(self, respx_mock: MockRouter, client: Gradient) -> None: class Model(BaseModel): name: str respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - stream = self.client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model]) + stream = client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model]) assert isinstance(stream, Stream) stream.response.close() @@ -725,11 +742,14 @@ class Model(BaseModel): with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=False) + non_strict_client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=False) - response = client.get("/foo", cast_to=Model) + response = non_strict_client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] + strict_client.close() + non_strict_client.close() + @pytest.mark.parametrize( "remaining_retries,retry_after,timeout", [ @@ -752,9 +772,9 @@ class Model(BaseModel): ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) - def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = Gradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - + def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float, client: Gradient + ) -> None: headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) @@ -776,7 +796,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien model="llama3-8b-instruct", ).__enter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(client) == 0 @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @@ -793,7 +813,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client ], model="llama3-8b-instruct", ).__enter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @@ -921,83 +941,77 @@ def test_default_client_creation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - def test_follow_redirects(self, respx_mock: MockRouter) -> None: + def test_follow_redirects(self, respx_mock: MockRouter, client: Gradient) -> None: # Test that the default follow_redirects=True allows following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) - response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + response = client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) assert response.status_code == 200 assert response.json() == {"status": "ok"} @pytest.mark.respx(base_url=base_url) - def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: Gradient) -> None: # Test that follow_redirects=False prevents following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) with pytest.raises(APIStatusError) as exc_info: - self.client.post( - "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response - ) + client.post("/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response) assert exc_info.value.response.status_code == 302 assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" class TestAsyncGradient: - client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_raw_response(self, respx_mock: MockRouter) -> None: + async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = await self.client.post("/foo", cast_to=httpx.Response) + response = await async_client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: + async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: respx_mock.post("/foo").mock( return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') ) - response = await self.client.post("/foo", cast_to=httpx.Response) + response = await async_client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} - def test_copy(self) -> None: - copied = self.client.copy() - assert id(copied) != id(self.client) + def test_copy(self, async_client: AsyncGradient) -> None: + copied = async_client.copy() + assert id(copied) != id(async_client) - copied = self.client.copy(access_token="another My Access Token") + copied = async_client.copy(access_token="another My Access Token") assert copied.access_token == "another My Access Token" - assert self.client.access_token == "My Access Token" + assert async_client.access_token == "My Access Token" - def test_copy_default_options(self) -> None: + def test_copy_default_options(self, async_client: AsyncGradient) -> None: # options that have a default are overridden correctly - copied = self.client.copy(max_retries=7) + copied = async_client.copy(max_retries=7) assert copied.max_retries == 7 - assert self.client.max_retries == 2 + assert async_client.max_retries == 2 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 assert copied.max_retries == 7 # timeout - assert isinstance(self.client.timeout, httpx.Timeout) - copied = self.client.copy(timeout=None) + assert isinstance(async_client.timeout, httpx.Timeout) + copied = async_client.copy(timeout=None) assert copied.timeout is None - assert isinstance(self.client.timeout, httpx.Timeout) + assert isinstance(async_client.timeout, httpx.Timeout) - def test_copy_default_headers(self) -> None: + async def test_copy_default_headers(self) -> None: client = AsyncGradient( base_url=base_url, access_token=access_token, @@ -1033,8 +1047,9 @@ def test_copy_default_headers(self) -> None: match="`default_headers` and `set_default_headers` arguments are mutually exclusive", ): client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + await client.close() - def test_copy_default_query(self) -> None: + async def test_copy_default_query(self) -> None: client = AsyncGradient( base_url=base_url, access_token=access_token, _strict_response_validation=True, default_query={"foo": "bar"} ) @@ -1070,13 +1085,15 @@ def test_copy_default_query(self) -> None: ): client.copy(set_default_query={}, default_query={"foo": "Bar"}) - def test_copy_signature(self) -> None: + await client.close() + + def test_copy_signature(self, async_client: AsyncGradient) -> None: # ensure the same parameters that can be passed to the client are defined in the `.copy()` method init_signature = inspect.signature( # mypy doesn't like that we access the `__init__` property. - self.client.__init__, # type: ignore[misc] + async_client.__init__, # type: ignore[misc] ) - copy_signature = inspect.signature(self.client.copy) + copy_signature = inspect.signature(async_client.copy) exclude_params = {"transport", "proxies", "_strict_response_validation"} for name in init_signature.parameters.keys(): @@ -1087,12 +1104,12 @@ def test_copy_signature(self) -> None: assert copy_param is not None, f"copy() signature is missing the {name} param" @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") - def test_copy_build_request(self) -> None: + def test_copy_build_request(self, async_client: AsyncGradient) -> None: options = FinalRequestOptions(method="get", url="/foo") def build_request(options: FinalRequestOptions) -> None: - client = self.client.copy() - client._build_request(options) + client_copy = async_client.copy() + client_copy._build_request(options) # ensure that the machinery is warmed up before tracing starts. build_request(options) @@ -1149,12 +1166,12 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic print(frame) raise AssertionError() - async def test_request_timeout(self) -> None: - request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + async def test_request_timeout(self, async_client: AsyncGradient) -> None: + request = async_client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT - request = self.client._build_request( + request = async_client._build_request( FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore @@ -1169,6 +1186,8 @@ async def test_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(0) + await client.close() + async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: @@ -1180,6 +1199,8 @@ async def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) + await client.close() + # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: client = AsyncGradient( @@ -1190,6 +1211,8 @@ async def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT + await client.close() + # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncGradient( @@ -1200,6 +1223,8 @@ async def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default + await client.close() + def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): with httpx.Client() as http_client: @@ -1210,18 +1235,18 @@ def test_invalid_http_client(self) -> None: http_client=cast(Any, http_client), ) - def test_default_headers_option(self) -> None: - client = AsyncGradient( + async def test_default_headers_option(self) -> None: + test_client = AsyncGradient( base_url=base_url, access_token=access_token, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = AsyncGradient( + test_client2 = AsyncGradient( base_url=base_url, access_token=access_token, _strict_response_validation=True, @@ -1230,10 +1255,13 @@ def test_default_headers_option(self) -> None: "X-Stainless-Lang": "my-overriding-header", }, ) - request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" + await test_client.close() + await test_client2.close() + def test_validate_headers(self) -> None: client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1253,7 +1281,7 @@ def test_validate_headers(self) -> None: ) assert request2.headers.get("Authorization") is None - def test_default_query_option(self) -> None: + async def test_default_query_option(self) -> None: client = AsyncGradient( base_url=base_url, access_token=access_token, @@ -1274,8 +1302,10 @@ def test_default_query_option(self) -> None: url = httpx.URL(request.url) assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} - def test_request_extra_json(self) -> None: - request = self.client._build_request( + await client.close() + + def test_request_extra_json(self, client: Gradient) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1286,7 +1316,7 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": False} - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1297,7 +1327,7 @@ def test_request_extra_json(self) -> None: assert data == {"baz": False} # `extra_json` takes priority over `json_data` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1308,8 +1338,8 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": None} - def test_request_extra_headers(self) -> None: - request = self.client._build_request( + def test_request_extra_headers(self, client: Gradient) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1319,7 +1349,7 @@ def test_request_extra_headers(self) -> None: assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash - request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + request = client.with_options(default_headers={"X-Bar": "true"})._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1330,8 +1360,8 @@ def test_request_extra_headers(self) -> None: ) assert request.headers.get("X-Bar") == "false" - def test_request_extra_query(self) -> None: - request = self.client._build_request( + def test_request_extra_query(self, client: Gradient) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1344,7 +1374,7 @@ def test_request_extra_query(self) -> None: assert params == {"my_query_param": "Foo"} # if both `query` and `extra_query` are given, they are merged - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1358,7 +1388,7 @@ def test_request_extra_query(self) -> None: assert params == {"bar": "1", "foo": "2"} # `extra_query` takes priority over `query` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1401,7 +1431,7 @@ def test_multipart_repeating_array(self, async_client: AsyncGradient) -> None: ] @pytest.mark.respx(base_url=base_url) - async def test_basic_union_response(self, respx_mock: MockRouter) -> None: + async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: class Model1(BaseModel): name: str @@ -1410,12 +1440,12 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" @pytest.mark.respx(base_url=base_url) - async def test_union_response_different_types(self, respx_mock: MockRouter) -> None: + async def test_union_response_different_types(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: """Union of objects with the same field name using a different type""" class Model1(BaseModel): @@ -1426,18 +1456,20 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) - async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + async def test_non_application_json_content_type_for_json_data( + self, respx_mock: MockRouter, async_client: AsyncGradient + ) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ @@ -1453,11 +1485,11 @@ class Model(BaseModel): ) ) - response = await self.client.get("/foo", cast_to=Model) + response = await async_client.get("/foo", cast_to=Model) assert isinstance(response, Model) assert response.foo == 2 - def test_base_url_setter(self) -> None: + async def test_base_url_setter(self) -> None: client = AsyncGradient( base_url="https://example.com/from_init", access_token=access_token, _strict_response_validation=True ) @@ -1467,7 +1499,9 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" - def test_base_url_env(self) -> None: + await client.close() + + async def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = AsyncGradient(access_token=access_token, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @@ -1489,7 +1523,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: + async def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1498,6 +1532,7 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + await client.close() @pytest.mark.parametrize( "client", @@ -1516,7 +1551,7 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: + async def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1525,6 +1560,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + await client.close() @pytest.mark.parametrize( "client", @@ -1543,7 +1579,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: AsyncGradient) -> None: + async def test_absolute_request_url(self, client: AsyncGradient) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1552,37 +1588,37 @@ def test_absolute_request_url(self, client: AsyncGradient) -> None: ), ) assert request.url == "https://myapi.com/foo" + await client.close() async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - assert not client.is_closed() + test_client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) + assert not test_client.is_closed() - copied = client.copy() - assert copied is not client + copied = test_client.copy() + assert copied is not test_client del copied await asyncio.sleep(0.2) - assert not client.is_closed() + assert not test_client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - async with client as c2: - assert c2 is client + test_client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) + async with test_client as c2: + assert c2 is test_client assert not c2.is_closed() - assert not client.is_closed() - assert client.is_closed() + assert not test_client.is_closed() + assert test_client.is_closed() @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + async def test_client_response_validation_error(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: class Model(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) with pytest.raises(APIResponseValidationError) as exc: - await self.client.get("/foo", cast_to=Model) + await async_client.get("/foo", cast_to=Model) assert isinstance(exc.value.__cause__, ValidationError) @@ -1596,19 +1632,17 @@ async def test_client_max_retries_validation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_default_stream_cls(self, respx_mock: MockRouter) -> None: + async def test_default_stream_cls(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: class Model(BaseModel): name: str respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - stream = await self.client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model]) + stream = await async_client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model]) assert isinstance(stream, AsyncStream) await stream.response.aclose() @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str @@ -1620,11 +1654,16 @@ class Model(BaseModel): with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=False) + non_strict_client = AsyncGradient( + base_url=base_url, access_token=access_token, _strict_response_validation=False + ) - response = await client.get("/foo", cast_to=Model) + response = await non_strict_client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] + await strict_client.close() + await non_strict_client.close() + @pytest.mark.parametrize( "remaining_retries,retry_after,timeout", [ @@ -1647,13 +1686,12 @@ class Model(BaseModel): ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) - @pytest.mark.asyncio - async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncGradient(base_url=base_url, access_token=access_token, _strict_response_validation=True) - + async def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float, async_client: AsyncGradient + ) -> None: headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) - calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @@ -1674,7 +1712,7 @@ async def test_retrying_timeout_errors_doesnt_leak( model="llama3-8b-instruct", ).__aenter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(async_client) == 0 @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @@ -1693,12 +1731,11 @@ async def test_retrying_status_errors_doesnt_leak( ], model="llama3-8b-instruct", ).__aenter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(async_client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) async def test_retries_taken( self, @@ -1738,7 +1775,6 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio async def test_omit_retry_count_header( self, async_client: AsyncGradient, failures_before_success: int, respx_mock: MockRouter ) -> None: @@ -1771,7 +1807,6 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio async def test_overwrite_retry_count_header( self, async_client: AsyncGradient, failures_before_success: int, respx_mock: MockRouter ) -> None: @@ -1828,26 +1863,26 @@ async def test_default_client_creation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - async def test_follow_redirects(self, respx_mock: MockRouter) -> None: + async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: # Test that the default follow_redirects=True allows following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) - response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + response = await async_client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) assert response.status_code == 200 assert response.json() == {"status": "ok"} @pytest.mark.respx(base_url=base_url) - async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncGradient) -> None: # Test that follow_redirects=False prevents following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) with pytest.raises(APIStatusError) as exc_info: - await self.client.post( + await async_client.post( "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response ) From 9c407bb4b0e0e999aaf2ea367b2733ca2a09fcc6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 18:49:30 +0000 Subject: [PATCH 192/200] feat(api): include indexing jobs --- .stats.yml | 4 +- api.md | 57 ++ src/gradient/_client.py | 50 +- src/gradient/resources/__init__.py | 14 + .../agents/evaluation_metrics/__init__.py | 14 + .../evaluation_metrics/evaluation_metrics.py | 32 + .../evaluation_metrics/scheduled_indexing.py | 377 +++++++++ .../knowledge_bases/indexing_jobs.py | 85 ++ .../knowledge_bases/knowledge_bases.py | 89 ++ src/gradient/resources/nfs/__init__.py | 33 + src/gradient/resources/nfs/nfs.py | 780 ++++++++++++++++++ src/gradient/resources/nfs/snapshots.py | 418 ++++++++++ src/gradient/types/__init__.py | 12 + .../agents/evaluation_metrics/__init__.py | 4 + .../scheduled_indexing_create_params.py | 22 + .../scheduled_indexing_create_response.py | 48 ++ .../scheduled_indexing_delete_response.py | 48 ++ .../scheduled_indexing_retrieve_response.py | 48 ++ ...wledge_base_list_indexing_jobs_response.py | 21 + .../types/knowledge_bases/__init__.py | 3 + ...dexing_job_retrieve_signed_url_response.py | 12 + src/gradient/types/nf_create_params.py | 23 + src/gradient/types/nf_create_response.py | 45 + src/gradient/types/nf_delete_params.py | 12 + .../types/nf_initiate_action_params.py | 47 ++ .../types/nf_initiate_action_response.py | 33 + src/gradient/types/nf_list_params.py | 12 + src/gradient/types/nf_list_response.py | 45 + src/gradient/types/nf_retrieve_params.py | 12 + src/gradient/types/nf_retrieve_response.py | 45 + src/gradient/types/nfs/__init__.py | 9 + .../types/nfs/snapshot_delete_params.py | 12 + .../types/nfs/snapshot_list_params.py | 18 + .../types/nfs/snapshot_list_response.py | 36 + .../types/nfs/snapshot_retrieve_params.py | 12 + .../types/nfs/snapshot_retrieve_response.py | 37 + .../test_scheduled_indexing.py | 274 ++++++ .../knowledge_bases/test_indexing_jobs.py | 85 ++ tests/api_resources/nfs/__init__.py | 1 + tests/api_resources/nfs/test_snapshots.py | 297 +++++++ tests/api_resources/test_knowledge_bases.py | 85 ++ tests/api_resources/test_nfs.py | 611 ++++++++++++++ 42 files changed, 3919 insertions(+), 3 deletions(-) create mode 100644 src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py create mode 100644 src/gradient/resources/nfs/__init__.py create mode 100644 src/gradient/resources/nfs/nfs.py create mode 100644 src/gradient/resources/nfs/snapshots.py create mode 100644 src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py create mode 100644 src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py create mode 100644 src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py create mode 100644 src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py create mode 100644 src/gradient/types/knowledge_base_list_indexing_jobs_response.py create mode 100644 src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py create mode 100644 src/gradient/types/nf_create_params.py create mode 100644 src/gradient/types/nf_create_response.py create mode 100644 src/gradient/types/nf_delete_params.py create mode 100644 src/gradient/types/nf_initiate_action_params.py create mode 100644 src/gradient/types/nf_initiate_action_response.py create mode 100644 src/gradient/types/nf_list_params.py create mode 100644 src/gradient/types/nf_list_response.py create mode 100644 src/gradient/types/nf_retrieve_params.py create mode 100644 src/gradient/types/nf_retrieve_response.py create mode 100644 src/gradient/types/nfs/__init__.py create mode 100644 src/gradient/types/nfs/snapshot_delete_params.py create mode 100644 src/gradient/types/nfs/snapshot_list_params.py create mode 100644 src/gradient/types/nfs/snapshot_list_response.py create mode 100644 src/gradient/types/nfs/snapshot_retrieve_params.py create mode 100644 src/gradient/types/nfs/snapshot_retrieve_response.py create mode 100644 tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py create mode 100644 tests/api_resources/nfs/__init__.py create mode 100644 tests/api_resources/nfs/test_snapshots.py create mode 100644 tests/api_resources/test_nfs.py diff --git a/.stats.yml b/.stats.yml index f8627752..425463f6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 175 +configured_endpoints: 188 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-f07d74847e620dfa26d8df40ea4680814af9bba381b3a57a7b6ed76ad49d85f8.yml openapi_spec_hash: e3553dc2abf2afd4368b736bcc32a289 -config_hash: bad49c3bf949d5168ec3896bedff253a +config_hash: b712366a70c9d33e22d40eb601ca972f diff --git a/api.md b/api.md index a2325441..49135772 100644 --- a/api.md +++ b/api.md @@ -243,6 +243,24 @@ Methods: - client.agents.evaluation_metrics.oauth2.dropbox.create_tokens(\*\*params) -> DropboxCreateTokensResponse +### ScheduledIndexing + +Types: + +```python +from gradient.types.agents.evaluation_metrics import ( + ScheduledIndexingCreateResponse, + ScheduledIndexingRetrieveResponse, + ScheduledIndexingDeleteResponse, +) +``` + +Methods: + +- client.agents.evaluation_metrics.scheduled_indexing.create(\*\*params) -> ScheduledIndexingCreateResponse +- client.agents.evaluation_metrics.scheduled_indexing.retrieve(knowledge_base_uuid) -> ScheduledIndexingRetrieveResponse +- client.agents.evaluation_metrics.scheduled_indexing.delete(uuid) -> ScheduledIndexingDeleteResponse + ## EvaluationRuns Types: @@ -825,6 +843,7 @@ from gradient.types import ( KnowledgeBaseUpdateResponse, KnowledgeBaseListResponse, KnowledgeBaseDeleteResponse, + KnowledgeBaseListIndexingJobsResponse, ) ``` @@ -835,6 +854,7 @@ Methods: - client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse - client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse - client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.list_indexing_jobs(knowledge_base_uuid) -> KnowledgeBaseListIndexingJobsResponse ## DataSources @@ -873,6 +893,7 @@ from gradient.types.knowledge_bases import ( IndexingJobRetrieveResponse, IndexingJobListResponse, IndexingJobRetrieveDataSourcesResponse, + IndexingJobRetrieveSignedURLResponse, IndexingJobUpdateCancelResponse, ) ``` @@ -883,6 +904,7 @@ Methods: - client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse - client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse - client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.retrieve_signed_url(indexing_job_uuid) -> IndexingJobRetrieveSignedURLResponse - client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Models @@ -982,3 +1004,38 @@ Methods: - client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse - client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse - client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse + +# Nfs + +Types: + +```python +from gradient.types import ( + NfCreateResponse, + NfRetrieveResponse, + NfListResponse, + NfInitiateActionResponse, +) +``` + +Methods: + +- client.nfs.create(\*\*params) -> NfCreateResponse +- client.nfs.retrieve(nfs_id, \*\*params) -> NfRetrieveResponse +- client.nfs.list(\*\*params) -> NfListResponse +- client.nfs.delete(nfs_id, \*\*params) -> None +- client.nfs.initiate_action(nfs_id, \*\*params) -> NfInitiateActionResponse + +## Snapshots + +Types: + +```python +from gradient.types.nfs import SnapshotRetrieveResponse, SnapshotListResponse +``` + +Methods: + +- client.nfs.snapshots.retrieve(nfs_snapshot_id, \*\*params) -> SnapshotRetrieveResponse +- client.nfs.snapshots.list(\*\*params) -> SnapshotListResponse +- client.nfs.snapshots.delete(nfs_snapshot_id, \*\*params) -> None diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 22f4d2e6..6734b4b6 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -32,8 +32,20 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, images, models, regions, databases, inference, gpu_droplets, knowledge_bases + from .resources import ( + nfs, + chat, + agents, + images, + models, + regions, + databases, + inference, + gpu_droplets, + knowledge_bases, + ) from .resources.images import ImagesResource, AsyncImagesResource + from .resources.nfs.nfs import NfsResource, AsyncNfsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource @@ -192,6 +204,12 @@ def databases(self) -> DatabasesResource: return DatabasesResource(self) + @cached_property + def nfs(self) -> NfsResource: + from .resources.nfs import NfsResource + + return NfsResource(self) + @cached_property def with_raw_response(self) -> GradientWithRawResponse: return GradientWithRawResponse(self) @@ -493,6 +511,12 @@ def databases(self) -> AsyncDatabasesResource: return AsyncDatabasesResource(self) + @cached_property + def nfs(self) -> AsyncNfsResource: + from .resources.nfs import AsyncNfsResource + + return AsyncNfsResource(self) + @cached_property def with_raw_response(self) -> AsyncGradientWithRawResponse: return AsyncGradientWithRawResponse(self) @@ -717,6 +741,12 @@ def databases(self) -> databases.DatabasesResourceWithRawResponse: return DatabasesResourceWithRawResponse(self._client.databases) + @cached_property + def nfs(self) -> nfs.NfsResourceWithRawResponse: + from .resources.nfs import NfsResourceWithRawResponse + + return NfsResourceWithRawResponse(self._client.nfs) + class AsyncGradientWithRawResponse: _client: AsyncGradient @@ -778,6 +808,12 @@ def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse: return AsyncDatabasesResourceWithRawResponse(self._client.databases) + @cached_property + def nfs(self) -> nfs.AsyncNfsResourceWithRawResponse: + from .resources.nfs import AsyncNfsResourceWithRawResponse + + return AsyncNfsResourceWithRawResponse(self._client.nfs) + class GradientWithStreamedResponse: _client: Gradient @@ -839,6 +875,12 @@ def databases(self) -> databases.DatabasesResourceWithStreamingResponse: return DatabasesResourceWithStreamingResponse(self._client.databases) + @cached_property + def nfs(self) -> nfs.NfsResourceWithStreamingResponse: + from .resources.nfs import NfsResourceWithStreamingResponse + + return NfsResourceWithStreamingResponse(self._client.nfs) + class AsyncGradientWithStreamedResponse: _client: AsyncGradient @@ -900,6 +942,12 @@ def databases(self) -> databases.AsyncDatabasesResourceWithStreamingResponse: return AsyncDatabasesResourceWithStreamingResponse(self._client.databases) + @cached_property + def nfs(self) -> nfs.AsyncNfsResourceWithStreamingResponse: + from .resources.nfs import AsyncNfsResourceWithStreamingResponse + + return AsyncNfsResourceWithStreamingResponse(self._client.nfs) + Client = Gradient diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py index fdc7d346..a797b18f 100644 --- a/src/gradient/resources/__init__.py +++ b/src/gradient/resources/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .nfs import ( + NfsResource, + AsyncNfsResource, + NfsResourceWithRawResponse, + AsyncNfsResourceWithRawResponse, + NfsResourceWithStreamingResponse, + AsyncNfsResourceWithStreamingResponse, +) from .chat import ( ChatResource, AsyncChatResource, @@ -128,4 +136,10 @@ "AsyncDatabasesResourceWithRawResponse", "DatabasesResourceWithStreamingResponse", "AsyncDatabasesResourceWithStreamingResponse", + "NfsResource", + "AsyncNfsResource", + "NfsResourceWithRawResponse", + "AsyncNfsResourceWithRawResponse", + "NfsResourceWithStreamingResponse", + "AsyncNfsResourceWithStreamingResponse", ] diff --git a/src/gradient/resources/agents/evaluation_metrics/__init__.py b/src/gradient/resources/agents/evaluation_metrics/__init__.py index 31e2f93b..515a221b 100644 --- a/src/gradient/resources/agents/evaluation_metrics/__init__.py +++ b/src/gradient/resources/agents/evaluation_metrics/__init__.py @@ -48,6 +48,14 @@ EvaluationMetricsResourceWithStreamingResponse, AsyncEvaluationMetricsResourceWithStreamingResponse, ) +from .scheduled_indexing import ( + ScheduledIndexingResource, + AsyncScheduledIndexingResource, + ScheduledIndexingResourceWithRawResponse, + AsyncScheduledIndexingResourceWithRawResponse, + ScheduledIndexingResourceWithStreamingResponse, + AsyncScheduledIndexingResourceWithStreamingResponse, +) __all__ = [ "WorkspacesResource", @@ -80,6 +88,12 @@ "AsyncOauth2ResourceWithRawResponse", "Oauth2ResourceWithStreamingResponse", "AsyncOauth2ResourceWithStreamingResponse", + "ScheduledIndexingResource", + "AsyncScheduledIndexingResource", + "ScheduledIndexingResourceWithRawResponse", + "AsyncScheduledIndexingResourceWithRawResponse", + "ScheduledIndexingResourceWithStreamingResponse", + "AsyncScheduledIndexingResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py index 43c1aa9b..14ea4d55 100644 --- a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -40,6 +40,14 @@ ) from ...._base_client import make_request_options from ....types.agents import evaluation_metric_list_regions_params +from .scheduled_indexing import ( + ScheduledIndexingResource, + AsyncScheduledIndexingResource, + ScheduledIndexingResourceWithRawResponse, + AsyncScheduledIndexingResourceWithRawResponse, + ScheduledIndexingResourceWithStreamingResponse, + AsyncScheduledIndexingResourceWithStreamingResponse, +) from .anthropic.anthropic import ( AnthropicResource, AsyncAnthropicResource, @@ -83,6 +91,10 @@ def openai(self) -> OpenAIResource: def oauth2(self) -> Oauth2Resource: return Oauth2Resource(self._client) + @cached_property + def scheduled_indexing(self) -> ScheduledIndexingResource: + return ScheduledIndexingResource(self._client) + @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -196,6 +208,10 @@ def openai(self) -> AsyncOpenAIResource: def oauth2(self) -> AsyncOauth2Resource: return AsyncOauth2Resource(self._client) + @cached_property + def scheduled_indexing(self) -> AsyncScheduledIndexingResource: + return AsyncScheduledIndexingResource(self._client) + @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -319,6 +335,10 @@ def openai(self) -> OpenAIResourceWithRawResponse: def oauth2(self) -> Oauth2ResourceWithRawResponse: return Oauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2) + @cached_property + def scheduled_indexing(self) -> ScheduledIndexingResourceWithRawResponse: + return ScheduledIndexingResourceWithRawResponse(self._evaluation_metrics.scheduled_indexing) + class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -351,6 +371,10 @@ def openai(self) -> AsyncOpenAIResourceWithRawResponse: def oauth2(self) -> AsyncOauth2ResourceWithRawResponse: return AsyncOauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2) + @cached_property + def scheduled_indexing(self) -> AsyncScheduledIndexingResourceWithRawResponse: + return AsyncScheduledIndexingResourceWithRawResponse(self._evaluation_metrics.scheduled_indexing) + class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -383,6 +407,10 @@ def openai(self) -> OpenAIResourceWithStreamingResponse: def oauth2(self) -> Oauth2ResourceWithStreamingResponse: return Oauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2) + @cached_property + def scheduled_indexing(self) -> ScheduledIndexingResourceWithStreamingResponse: + return ScheduledIndexingResourceWithStreamingResponse(self._evaluation_metrics.scheduled_indexing) + class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -414,3 +442,7 @@ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: @cached_property def oauth2(self) -> AsyncOauth2ResourceWithStreamingResponse: return AsyncOauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2) + + @cached_property + def scheduled_indexing(self) -> AsyncScheduledIndexingResourceWithStreamingResponse: + return AsyncScheduledIndexingResourceWithStreamingResponse(self._evaluation_metrics.scheduled_indexing) diff --git a/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py b/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py new file mode 100644 index 00000000..e346f7ae --- /dev/null +++ b/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py @@ -0,0 +1,377 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.agents.evaluation_metrics import scheduled_indexing_create_params +from ....types.agents.evaluation_metrics.scheduled_indexing_create_response import ScheduledIndexingCreateResponse +from ....types.agents.evaluation_metrics.scheduled_indexing_delete_response import ScheduledIndexingDeleteResponse +from ....types.agents.evaluation_metrics.scheduled_indexing_retrieve_response import ScheduledIndexingRetrieveResponse + +__all__ = ["ScheduledIndexingResource", "AsyncScheduledIndexingResource"] + + +class ScheduledIndexingResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ScheduledIndexingResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return ScheduledIndexingResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ScheduledIndexingResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return ScheduledIndexingResourceWithStreamingResponse(self) + + def create( + self, + *, + days: Iterable[int] | Omit = omit, + knowledge_base_uuid: str | Omit = omit, + time: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ScheduledIndexingCreateResponse: + """ + To create scheduled indexing for a knowledge base, send a POST request to + `/v2/gen-ai/scheduled-indexing`. + + Args: + days: Days for execution (day is represented same as in a cron expression, e.g. Monday + begins with 1 ) + + knowledge_base_uuid: Knowledge base uuid for which the schedule is created + + time: Time of execution (HH:MM) UTC + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/scheduled-indexing" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/scheduled-indexing", + body=maybe_transform( + { + "days": days, + "knowledge_base_uuid": knowledge_base_uuid, + "time": time, + }, + scheduled_indexing_create_params.ScheduledIndexingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ScheduledIndexingCreateResponse, + ) + + def retrieve( + self, + knowledge_base_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ScheduledIndexingRetrieveResponse: + """ + Get Scheduled Indexing for knowledge base using knoweldge base uuid, send a GET + request to `/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ScheduledIndexingRetrieveResponse, + ) + + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ScheduledIndexingDeleteResponse: + """ + Delete Scheduled Indexing for knowledge base, send a DELETE request to + `/v2/gen-ai/scheduled-indexing/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/gen-ai/scheduled-indexing/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ScheduledIndexingDeleteResponse, + ) + + +class AsyncScheduledIndexingResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncScheduledIndexingResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncScheduledIndexingResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncScheduledIndexingResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncScheduledIndexingResourceWithStreamingResponse(self) + + async def create( + self, + *, + days: Iterable[int] | Omit = omit, + knowledge_base_uuid: str | Omit = omit, + time: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ScheduledIndexingCreateResponse: + """ + To create scheduled indexing for a knowledge base, send a POST request to + `/v2/gen-ai/scheduled-indexing`. + + Args: + days: Days for execution (day is represented same as in a cron expression, e.g. Monday + begins with 1 ) + + knowledge_base_uuid: Knowledge base uuid for which the schedule is created + + time: Time of execution (HH:MM) UTC + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/scheduled-indexing" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/scheduled-indexing", + body=await async_maybe_transform( + { + "days": days, + "knowledge_base_uuid": knowledge_base_uuid, + "time": time, + }, + scheduled_indexing_create_params.ScheduledIndexingCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ScheduledIndexingCreateResponse, + ) + + async def retrieve( + self, + knowledge_base_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ScheduledIndexingRetrieveResponse: + """ + Get Scheduled Indexing for knowledge base using knoweldge base uuid, send a GET + request to `/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ScheduledIndexingRetrieveResponse, + ) + + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ScheduledIndexingDeleteResponse: + """ + Delete Scheduled Indexing for knowledge base, send a DELETE request to + `/v2/gen-ai/scheduled-indexing/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/gen-ai/scheduled-indexing/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ScheduledIndexingDeleteResponse, + ) + + +class ScheduledIndexingResourceWithRawResponse: + def __init__(self, scheduled_indexing: ScheduledIndexingResource) -> None: + self._scheduled_indexing = scheduled_indexing + + self.create = to_raw_response_wrapper( + scheduled_indexing.create, + ) + self.retrieve = to_raw_response_wrapper( + scheduled_indexing.retrieve, + ) + self.delete = to_raw_response_wrapper( + scheduled_indexing.delete, + ) + + +class AsyncScheduledIndexingResourceWithRawResponse: + def __init__(self, scheduled_indexing: AsyncScheduledIndexingResource) -> None: + self._scheduled_indexing = scheduled_indexing + + self.create = async_to_raw_response_wrapper( + scheduled_indexing.create, + ) + self.retrieve = async_to_raw_response_wrapper( + scheduled_indexing.retrieve, + ) + self.delete = async_to_raw_response_wrapper( + scheduled_indexing.delete, + ) + + +class ScheduledIndexingResourceWithStreamingResponse: + def __init__(self, scheduled_indexing: ScheduledIndexingResource) -> None: + self._scheduled_indexing = scheduled_indexing + + self.create = to_streamed_response_wrapper( + scheduled_indexing.create, + ) + self.retrieve = to_streamed_response_wrapper( + scheduled_indexing.retrieve, + ) + self.delete = to_streamed_response_wrapper( + scheduled_indexing.delete, + ) + + +class AsyncScheduledIndexingResourceWithStreamingResponse: + def __init__(self, scheduled_indexing: AsyncScheduledIndexingResource) -> None: + self._scheduled_indexing = scheduled_indexing + + self.create = async_to_streamed_response_wrapper( + scheduled_indexing.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + scheduled_indexing.retrieve, + ) + self.delete = async_to_streamed_response_wrapper( + scheduled_indexing.delete, + ) diff --git a/src/gradient/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py index 95898c2a..4c222bcf 100644 --- a/src/gradient/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py @@ -24,6 +24,7 @@ from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ...types.knowledge_bases.indexing_job_retrieve_signed_url_response import IndexingJobRetrieveSignedURLResponse from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] @@ -217,6 +218,42 @@ def retrieve_data_sources( cast_to=IndexingJobRetrieveDataSourcesResponse, ) + def retrieve_signed_url( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> IndexingJobRetrieveSignedURLResponse: + """ + To get a signed URL for indexing job details, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return self._get( + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveSignedURLResponse, + ) + def update_cancel( self, path_uuid: str, @@ -448,6 +485,42 @@ async def retrieve_data_sources( cast_to=IndexingJobRetrieveDataSourcesResponse, ) + async def retrieve_signed_url( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> IndexingJobRetrieveSignedURLResponse: + """ + To get a signed URL for indexing job details, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return await self._get( + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveSignedURLResponse, + ) + async def update_cancel( self, path_uuid: str, @@ -507,6 +580,9 @@ def __init__(self, indexing_jobs: IndexingJobsResource) -> None: self.retrieve_data_sources = to_raw_response_wrapper( indexing_jobs.retrieve_data_sources, ) + self.retrieve_signed_url = to_raw_response_wrapper( + indexing_jobs.retrieve_signed_url, + ) self.update_cancel = to_raw_response_wrapper( indexing_jobs.update_cancel, ) @@ -528,6 +604,9 @@ def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: self.retrieve_data_sources = async_to_raw_response_wrapper( indexing_jobs.retrieve_data_sources, ) + self.retrieve_signed_url = async_to_raw_response_wrapper( + indexing_jobs.retrieve_signed_url, + ) self.update_cancel = async_to_raw_response_wrapper( indexing_jobs.update_cancel, ) @@ -549,6 +628,9 @@ def __init__(self, indexing_jobs: IndexingJobsResource) -> None: self.retrieve_data_sources = to_streamed_response_wrapper( indexing_jobs.retrieve_data_sources, ) + self.retrieve_signed_url = to_streamed_response_wrapper( + indexing_jobs.retrieve_signed_url, + ) self.update_cancel = to_streamed_response_wrapper( indexing_jobs.update_cancel, ) @@ -570,6 +652,9 @@ def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: self.retrieve_data_sources = async_to_streamed_response_wrapper( indexing_jobs.retrieve_data_sources, ) + self.retrieve_signed_url = async_to_streamed_response_wrapper( + indexing_jobs.retrieve_signed_url, + ) self.update_cancel = async_to_streamed_response_wrapper( indexing_jobs.update_cancel, ) diff --git a/src/gradient/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py index 00fa0659..b297280f 100644 --- a/src/gradient/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py @@ -39,6 +39,7 @@ from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse +from ...types.knowledge_base_list_indexing_jobs_response import KnowledgeBaseListIndexingJobsResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] @@ -330,6 +331,44 @@ def delete( cast_to=KnowledgeBaseDeleteResponse, ) + def list_indexing_jobs( + self, + knowledge_base_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> KnowledgeBaseListIndexingJobsResponse: + """ + To list latest 15 indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseListIndexingJobsResponse, + ) + class AsyncKnowledgeBasesResource(AsyncAPIResource): @cached_property @@ -618,6 +657,44 @@ async def delete( cast_to=KnowledgeBaseDeleteResponse, ) + async def list_indexing_jobs( + self, + knowledge_base_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> KnowledgeBaseListIndexingJobsResponse: + """ + To list latest 15 indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseListIndexingJobsResponse, + ) + class KnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -638,6 +715,9 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.delete = to_raw_response_wrapper( knowledge_bases.delete, ) + self.list_indexing_jobs = to_raw_response_wrapper( + knowledge_bases.list_indexing_jobs, + ) @cached_property def data_sources(self) -> DataSourcesResourceWithRawResponse: @@ -667,6 +747,9 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.delete = async_to_raw_response_wrapper( knowledge_bases.delete, ) + self.list_indexing_jobs = async_to_raw_response_wrapper( + knowledge_bases.list_indexing_jobs, + ) @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: @@ -696,6 +779,9 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.delete = to_streamed_response_wrapper( knowledge_bases.delete, ) + self.list_indexing_jobs = to_streamed_response_wrapper( + knowledge_bases.list_indexing_jobs, + ) @cached_property def data_sources(self) -> DataSourcesResourceWithStreamingResponse: @@ -725,6 +811,9 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.delete = async_to_streamed_response_wrapper( knowledge_bases.delete, ) + self.list_indexing_jobs = async_to_streamed_response_wrapper( + knowledge_bases.list_indexing_jobs, + ) @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: diff --git a/src/gradient/resources/nfs/__init__.py b/src/gradient/resources/nfs/__init__.py new file mode 100644 index 00000000..28f843c0 --- /dev/null +++ b/src/gradient/resources/nfs/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .nfs import ( + NfsResource, + AsyncNfsResource, + NfsResourceWithRawResponse, + AsyncNfsResourceWithRawResponse, + NfsResourceWithStreamingResponse, + AsyncNfsResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) + +__all__ = [ + "SnapshotsResource", + "AsyncSnapshotsResource", + "SnapshotsResourceWithRawResponse", + "AsyncSnapshotsResourceWithRawResponse", + "SnapshotsResourceWithStreamingResponse", + "AsyncSnapshotsResourceWithStreamingResponse", + "NfsResource", + "AsyncNfsResource", + "NfsResourceWithRawResponse", + "AsyncNfsResourceWithRawResponse", + "NfsResourceWithStreamingResponse", + "AsyncNfsResourceWithStreamingResponse", +] diff --git a/src/gradient/resources/nfs/nfs.py b/src/gradient/resources/nfs/nfs.py new file mode 100644 index 00000000..1510bb69 --- /dev/null +++ b/src/gradient/resources/nfs/nfs.py @@ -0,0 +1,780 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, overload + +import httpx + +from ...types import nf_list_params, nf_create_params, nf_delete_params, nf_retrieve_params, nf_initiate_action_params +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.nf_list_response import NfListResponse +from ...types.nf_create_response import NfCreateResponse +from ...types.nf_retrieve_response import NfRetrieveResponse +from ...types.nf_initiate_action_response import NfInitiateActionResponse + +__all__ = ["NfsResource", "AsyncNfsResource"] + + +class NfsResource(SyncAPIResource): + @cached_property + def snapshots(self) -> SnapshotsResource: + return SnapshotsResource(self._client) + + @cached_property + def with_raw_response(self) -> NfsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return NfsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> NfsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return NfsResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str, + region: str, + size_gib: int, + vpc_ids: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfCreateResponse: + """ + To create a new NFS share, send a POST request to `/v2/nfs`. + + Args: + name: The human-readable name of the share. + + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + size_gib: The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50. + + vpc_ids: List of VPC IDs that should be able to access the share. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs", + body=maybe_transform( + { + "name": name, + "region": region, + "size_gib": size_gib, + "vpc_ids": vpc_ids, + }, + nf_create_params.NfCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NfCreateResponse, + ) + + def retrieve( + self, + nfs_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfRetrieveResponse: + """ + To get an NFS share, send a GET request to `/v2/nfs/{nfs_id}?region=${region}`. + + A successful request will return the NFS share. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_id: + raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}") + return self._get( + f"/v2/nfs/{nfs_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/{nfs_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"region": region}, nf_retrieve_params.NfRetrieveParams), + ), + cast_to=NfRetrieveResponse, + ) + + def list( + self, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfListResponse: + """ + To list NFS shares, send a GET request to `/v2/nfs?region=${region}`. + + A successful request will return all NFS shares belonging to the authenticated + user. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"region": region}, nf_list_params.NfListParams), + ), + cast_to=NfListResponse, + ) + + def delete( + self, + nfs_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + To delete an NFS share, send a DELETE request to + `/v2/nfs/{nfs_id}?region=${region}`. + + A successful request will return a `204 No Content` status code. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_id: + raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/nfs/{nfs_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/{nfs_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"region": region}, nf_delete_params.NfDeleteParams), + ), + cast_to=NoneType, + ) + + @overload + def initiate_action( + self, + nfs_id: str, + *, + region: str, + type: Literal["resize", "snapshot"], + params: nf_initiate_action_params.NfsActionResizeParams | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfInitiateActionResponse: + """ + To execute an action (such as resize) on a specified NFS share, send a POST + request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | + | ----------------------- | -------------------------------------------------------------------------- | + | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB | + | `snapshot` | Takes a snapshot of an NFS share | + + Args: + region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides. + + type: The type of action to initiate for the NFS share (such as resize or snapshot). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_action( + self, + nfs_id: str, + *, + region: str, + type: Literal["resize", "snapshot"], + params: nf_initiate_action_params.NfsActionSnapshotParams | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfInitiateActionResponse: + """ + To execute an action (such as resize) on a specified NFS share, send a POST + request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | + | ----------------------- | -------------------------------------------------------------------------- | + | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB | + | `snapshot` | Takes a snapshot of an NFS share | + + Args: + region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides. + + type: The type of action to initiate for the NFS share (such as resize or snapshot). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["region", "type"]) + def initiate_action( + self, + nfs_id: str, + *, + region: str, + type: Literal["resize", "snapshot"], + params: nf_initiate_action_params.NfsActionResizeParams + | nf_initiate_action_params.NfsActionSnapshotParams + | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfInitiateActionResponse: + if not nfs_id: + raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}") + return self._post( + f"/v2/nfs/{nfs_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/{nfs_id}/actions", + body=maybe_transform( + { + "region": region, + "type": type, + "params": params, + }, + nf_initiate_action_params.NfInitiateActionParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NfInitiateActionResponse, + ) + + +class AsyncNfsResource(AsyncAPIResource): + @cached_property + def snapshots(self) -> AsyncSnapshotsResource: + return AsyncSnapshotsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncNfsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncNfsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncNfsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncNfsResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str, + region: str, + size_gib: int, + vpc_ids: SequenceNotStr[str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfCreateResponse: + """ + To create a new NFS share, send a POST request to `/v2/nfs`. + + Args: + name: The human-readable name of the share. + + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + size_gib: The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50. + + vpc_ids: List of VPC IDs that should be able to access the share. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs", + body=await async_maybe_transform( + { + "name": name, + "region": region, + "size_gib": size_gib, + "vpc_ids": vpc_ids, + }, + nf_create_params.NfCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NfCreateResponse, + ) + + async def retrieve( + self, + nfs_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfRetrieveResponse: + """ + To get an NFS share, send a GET request to `/v2/nfs/{nfs_id}?region=${region}`. + + A successful request will return the NFS share. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_id: + raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}") + return await self._get( + f"/v2/nfs/{nfs_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/{nfs_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"region": region}, nf_retrieve_params.NfRetrieveParams), + ), + cast_to=NfRetrieveResponse, + ) + + async def list( + self, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfListResponse: + """ + To list NFS shares, send a GET request to `/v2/nfs?region=${region}`. + + A successful request will return all NFS shares belonging to the authenticated + user. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"region": region}, nf_list_params.NfListParams), + ), + cast_to=NfListResponse, + ) + + async def delete( + self, + nfs_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + To delete an NFS share, send a DELETE request to + `/v2/nfs/{nfs_id}?region=${region}`. + + A successful request will return a `204 No Content` status code. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_id: + raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/nfs/{nfs_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/{nfs_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"region": region}, nf_delete_params.NfDeleteParams), + ), + cast_to=NoneType, + ) + + @overload + async def initiate_action( + self, + nfs_id: str, + *, + region: str, + type: Literal["resize", "snapshot"], + params: nf_initiate_action_params.NfsActionResizeParams | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfInitiateActionResponse: + """ + To execute an action (such as resize) on a specified NFS share, send a POST + request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | + | ----------------------- | -------------------------------------------------------------------------- | + | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB | + | `snapshot` | Takes a snapshot of an NFS share | + + Args: + region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides. + + type: The type of action to initiate for the NFS share (such as resize or snapshot). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_action( + self, + nfs_id: str, + *, + region: str, + type: Literal["resize", "snapshot"], + params: nf_initiate_action_params.NfsActionSnapshotParams | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfInitiateActionResponse: + """ + To execute an action (such as resize) on a specified NFS share, send a POST + request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | + | ----------------------- | -------------------------------------------------------------------------- | + | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB | + | `snapshot` | Takes a snapshot of an NFS share | + + Args: + region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides. + + type: The type of action to initiate for the NFS share (such as resize or snapshot). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["region", "type"]) + async def initiate_action( + self, + nfs_id: str, + *, + region: str, + type: Literal["resize", "snapshot"], + params: nf_initiate_action_params.NfsActionResizeParams + | nf_initiate_action_params.NfsActionSnapshotParams + | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> NfInitiateActionResponse: + if not nfs_id: + raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}") + return await self._post( + f"/v2/nfs/{nfs_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/{nfs_id}/actions", + body=await async_maybe_transform( + { + "region": region, + "type": type, + "params": params, + }, + nf_initiate_action_params.NfInitiateActionParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NfInitiateActionResponse, + ) + + +class NfsResourceWithRawResponse: + def __init__(self, nfs: NfsResource) -> None: + self._nfs = nfs + + self.create = to_raw_response_wrapper( + nfs.create, + ) + self.retrieve = to_raw_response_wrapper( + nfs.retrieve, + ) + self.list = to_raw_response_wrapper( + nfs.list, + ) + self.delete = to_raw_response_wrapper( + nfs.delete, + ) + self.initiate_action = to_raw_response_wrapper( + nfs.initiate_action, + ) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithRawResponse: + return SnapshotsResourceWithRawResponse(self._nfs.snapshots) + + +class AsyncNfsResourceWithRawResponse: + def __init__(self, nfs: AsyncNfsResource) -> None: + self._nfs = nfs + + self.create = async_to_raw_response_wrapper( + nfs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + nfs.retrieve, + ) + self.list = async_to_raw_response_wrapper( + nfs.list, + ) + self.delete = async_to_raw_response_wrapper( + nfs.delete, + ) + self.initiate_action = async_to_raw_response_wrapper( + nfs.initiate_action, + ) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse: + return AsyncSnapshotsResourceWithRawResponse(self._nfs.snapshots) + + +class NfsResourceWithStreamingResponse: + def __init__(self, nfs: NfsResource) -> None: + self._nfs = nfs + + self.create = to_streamed_response_wrapper( + nfs.create, + ) + self.retrieve = to_streamed_response_wrapper( + nfs.retrieve, + ) + self.list = to_streamed_response_wrapper( + nfs.list, + ) + self.delete = to_streamed_response_wrapper( + nfs.delete, + ) + self.initiate_action = to_streamed_response_wrapper( + nfs.initiate_action, + ) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithStreamingResponse: + return SnapshotsResourceWithStreamingResponse(self._nfs.snapshots) + + +class AsyncNfsResourceWithStreamingResponse: + def __init__(self, nfs: AsyncNfsResource) -> None: + self._nfs = nfs + + self.create = async_to_streamed_response_wrapper( + nfs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + nfs.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + nfs.list, + ) + self.delete = async_to_streamed_response_wrapper( + nfs.delete, + ) + self.initiate_action = async_to_streamed_response_wrapper( + nfs.initiate_action, + ) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse: + return AsyncSnapshotsResourceWithStreamingResponse(self._nfs.snapshots) diff --git a/src/gradient/resources/nfs/snapshots.py b/src/gradient/resources/nfs/snapshots.py new file mode 100644 index 00000000..65b56e03 --- /dev/null +++ b/src/gradient/resources/nfs/snapshots.py @@ -0,0 +1,418 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.nfs import snapshot_list_params, snapshot_delete_params, snapshot_retrieve_params +from ..._base_client import make_request_options +from ...types.nfs.snapshot_list_response import SnapshotListResponse +from ...types.nfs.snapshot_retrieve_response import SnapshotRetrieveResponse + +__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] + + +class SnapshotsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return SnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return SnapshotsResourceWithStreamingResponse(self) + + def retrieve( + self, + nfs_snapshot_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SnapshotRetrieveResponse: + """ + To get an NFS snapshot, send a GET request to + `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`. + + A successful request will return the NFS snapshot. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_snapshot_id: + raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}") + return self._get( + f"/v2/nfs/snapshots/{nfs_snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"region": region}, snapshot_retrieve_params.SnapshotRetrieveParams), + ), + cast_to=SnapshotRetrieveResponse, + ) + + def list( + self, + *, + region: str, + share_id: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SnapshotListResponse: + """ + To list all NFS snapshots, send a GET request to + `/v2/nfs/snapshots?region=${region}&share_id={share_id}`. + + A successful request will return all NFS snapshots belonging to the + authenticated user in the specified region. + + Optionally, you can filter snapshots by a specific NFS share by including the + `share_id` query parameter. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + share_id: The unique ID of an NFS share. If provided, only snapshots of this specific + share will be returned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/nfs/snapshots" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/nfs/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "region": region, + "share_id": share_id, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + def delete( + self, + nfs_snapshot_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + To delete an NFS snapshot, send a DELETE request to + `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`. + + A successful request will return a `204 No Content` status code. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_snapshot_id: + raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/nfs/snapshots/{nfs_snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"region": region}, snapshot_delete_params.SnapshotDeleteParams), + ), + cast_to=NoneType, + ) + + +class AsyncSnapshotsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncSnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncSnapshotsResourceWithStreamingResponse(self) + + async def retrieve( + self, + nfs_snapshot_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SnapshotRetrieveResponse: + """ + To get an NFS snapshot, send a GET request to + `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`. + + A successful request will return the NFS snapshot. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_snapshot_id: + raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}") + return await self._get( + f"/v2/nfs/snapshots/{nfs_snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"region": region}, snapshot_retrieve_params.SnapshotRetrieveParams), + ), + cast_to=SnapshotRetrieveResponse, + ) + + async def list( + self, + *, + region: str, + share_id: str | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SnapshotListResponse: + """ + To list all NFS snapshots, send a GET request to + `/v2/nfs/snapshots?region=${region}&share_id={share_id}`. + + A successful request will return all NFS snapshots belonging to the + authenticated user in the specified region. + + Optionally, you can filter snapshots by a specific NFS share by including the + `share_id` query parameter. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + share_id: The unique ID of an NFS share. If provided, only snapshots of this specific + share will be returned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/nfs/snapshots" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/nfs/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "region": region, + "share_id": share_id, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + async def delete( + self, + nfs_snapshot_id: str, + *, + region: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> None: + """ + To delete an NFS snapshot, send a DELETE request to + `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`. + + A successful request will return a `204 No Content` status code. + + Args: + region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not nfs_snapshot_id: + raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/nfs/snapshots/{nfs_snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"region": region}, snapshot_delete_params.SnapshotDeleteParams), + ), + cast_to=NoneType, + ) + + +class SnapshotsResourceWithRawResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = to_raw_response_wrapper( + snapshots.list, + ) + self.delete = to_raw_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithRawResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = async_to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_raw_response_wrapper( + snapshots.list, + ) + self.delete = async_to_raw_response_wrapper( + snapshots.delete, + ) + + +class SnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = to_streamed_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = async_to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = async_to_streamed_response_wrapper( + snapshots.delete, + ) diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py index a7edfacb..4cd87a44 100644 --- a/src/gradient/types/__init__.py +++ b/src/gradient/types/__init__.py @@ -50,17 +50,24 @@ from .api_model import APIModel as APIModel from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace +from .nf_list_params import NfListParams as NfListParams from .api_agent_model import APIAgentModel as APIAgentModel +from .nf_create_params import NfCreateParams as NfCreateParams +from .nf_delete_params import NfDeleteParams as NfDeleteParams +from .nf_list_response import NfListResponse as NfListResponse from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .nf_create_response import NfCreateResponse as NfCreateResponse +from .nf_retrieve_params import NfRetrieveParams as NfRetrieveParams from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams from .agent_list_response import AgentListResponse as AgentListResponse from .agent_update_params import AgentUpdateParams as AgentUpdateParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .nf_retrieve_response import NfRetrieveResponse as NfRetrieveResponse from .region_list_response import RegionListResponse as RegionListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse @@ -75,12 +82,14 @@ from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse +from .nf_initiate_action_params import NfInitiateActionParams as NfInitiateActionParams from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams from .agent_retrieve_usage_params import AgentRetrieveUsageParams as AgentRetrieveUsageParams from .droplet_backup_policy_param import DropletBackupPolicyParam as DropletBackupPolicyParam from .gpu_droplet_create_response import GPUDropletCreateResponse as GPUDropletCreateResponse +from .nf_initiate_action_response import NfInitiateActionResponse as NfInitiateActionResponse from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse @@ -99,6 +108,9 @@ from .gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse from .gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse as GPUDropletListNeighborsResponse from .gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse as GPUDropletListSnapshotsResponse +from .knowledge_base_list_indexing_jobs_response import ( + KnowledgeBaseListIndexingJobsResponse as KnowledgeBaseListIndexingJobsResponse, +) # Rebuild cyclical models only after all modules are imported. # This ensures that, when building the deferred (due to cyclical references) model schema, diff --git a/src/gradient/types/agents/evaluation_metrics/__init__.py b/src/gradient/types/agents/evaluation_metrics/__init__.py index 95d81dd2..135d4ac9 100644 --- a/src/gradient/types/agents/evaluation_metrics/__init__.py +++ b/src/gradient/types/agents/evaluation_metrics/__init__.py @@ -13,6 +13,10 @@ from .oauth2_generate_url_params import Oauth2GenerateURLParams as Oauth2GenerateURLParams from .workspace_retrieve_response import WorkspaceRetrieveResponse as WorkspaceRetrieveResponse from .oauth2_generate_url_response import Oauth2GenerateURLResponse as Oauth2GenerateURLResponse +from .scheduled_indexing_create_params import ScheduledIndexingCreateParams as ScheduledIndexingCreateParams +from .scheduled_indexing_create_response import ScheduledIndexingCreateResponse as ScheduledIndexingCreateResponse +from .scheduled_indexing_delete_response import ScheduledIndexingDeleteResponse as ScheduledIndexingDeleteResponse +from .scheduled_indexing_retrieve_response import ScheduledIndexingRetrieveResponse as ScheduledIndexingRetrieveResponse from .workspace_list_evaluation_test_cases_response import ( WorkspaceListEvaluationTestCasesResponse as WorkspaceListEvaluationTestCasesResponse, ) diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py new file mode 100644 index 00000000..209766b4 --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import TypedDict + +__all__ = ["ScheduledIndexingCreateParams"] + + +class ScheduledIndexingCreateParams(TypedDict, total=False): + days: Iterable[int] + """Days for execution (day is represented same as in a cron expression, e.g. + + Monday begins with 1 ) + """ + + knowledge_base_uuid: str + """Knowledge base uuid for which the schedule is created""" + + time: str + """Time of execution (HH:MM) UTC""" diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py new file mode 100644 index 00000000..c306c5b1 --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ...._models import BaseModel + +__all__ = ["ScheduledIndexingCreateResponse", "IndexingInfo"] + + +class IndexingInfo(BaseModel): + created_at: Optional[datetime] = None + """Created at timestamp""" + + days: Optional[List[int]] = None + """Days for execution (day is represented same as in a cron expression, e.g. + + Monday begins with 1 ) + """ + + deleted_at: Optional[datetime] = None + """Deleted at timestamp (if soft deleted)""" + + is_active: Optional[bool] = None + """Whether the schedule is currently active""" + + knowledge_base_uuid: Optional[str] = None + """Knowledge base uuid associated with this schedule""" + + last_ran_at: Optional[datetime] = None + """Last time the schedule was executed""" + + next_run_at: Optional[datetime] = None + """Next scheduled run""" + + time: Optional[str] = None + """Scheduled time of execution (HH:MM:SS format)""" + + updated_at: Optional[datetime] = None + """Updated at timestamp""" + + uuid: Optional[str] = None + """Unique identifier for the scheduled indexing entry""" + + +class ScheduledIndexingCreateResponse(BaseModel): + indexing_info: Optional[IndexingInfo] = None + """Metadata for scheduled indexing entries""" diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py new file mode 100644 index 00000000..febf3759 --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ...._models import BaseModel + +__all__ = ["ScheduledIndexingDeleteResponse", "IndexingInfo"] + + +class IndexingInfo(BaseModel): + created_at: Optional[datetime] = None + """Created at timestamp""" + + days: Optional[List[int]] = None + """Days for execution (day is represented same as in a cron expression, e.g. + + Monday begins with 1 ) + """ + + deleted_at: Optional[datetime] = None + """Deleted at timestamp (if soft deleted)""" + + is_active: Optional[bool] = None + """Whether the schedule is currently active""" + + knowledge_base_uuid: Optional[str] = None + """Knowledge base uuid associated with this schedule""" + + last_ran_at: Optional[datetime] = None + """Last time the schedule was executed""" + + next_run_at: Optional[datetime] = None + """Next scheduled run""" + + time: Optional[str] = None + """Scheduled time of execution (HH:MM:SS format)""" + + updated_at: Optional[datetime] = None + """Updated at timestamp""" + + uuid: Optional[str] = None + """Unique identifier for the scheduled indexing entry""" + + +class ScheduledIndexingDeleteResponse(BaseModel): + indexing_info: Optional[IndexingInfo] = None + """Metadata for scheduled indexing entries""" diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py new file mode 100644 index 00000000..1776c83d --- /dev/null +++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ...._models import BaseModel + +__all__ = ["ScheduledIndexingRetrieveResponse", "IndexingInfo"] + + +class IndexingInfo(BaseModel): + created_at: Optional[datetime] = None + """Created at timestamp""" + + days: Optional[List[int]] = None + """Days for execution (day is represented same as in a cron expression, e.g. + + Monday begins with 1 ) + """ + + deleted_at: Optional[datetime] = None + """Deleted at timestamp (if soft deleted)""" + + is_active: Optional[bool] = None + """Whether the schedule is currently active""" + + knowledge_base_uuid: Optional[str] = None + """Knowledge base uuid associated with this schedule""" + + last_ran_at: Optional[datetime] = None + """Last time the schedule was executed""" + + next_run_at: Optional[datetime] = None + """Next scheduled run""" + + time: Optional[str] = None + """Scheduled time of execution (HH:MM:SS format)""" + + updated_at: Optional[datetime] = None + """Updated at timestamp""" + + uuid: Optional[str] = None + """Unique identifier for the scheduled indexing entry""" + + +class ScheduledIndexingRetrieveResponse(BaseModel): + indexing_info: Optional[IndexingInfo] = None + """Metadata for scheduled indexing entries""" diff --git a/src/gradient/types/knowledge_base_list_indexing_jobs_response.py b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py new file mode 100644 index 00000000..d88f83fc --- /dev/null +++ b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks +from .knowledge_bases.api_indexing_job import APIIndexingJob + +__all__ = ["KnowledgeBaseListIndexingJobsResponse"] + + +class KnowledgeBaseListIndexingJobsResponse(BaseModel): + jobs: Optional[List[APIIndexingJob]] = None + """The indexing jobs""" + + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradient/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py index cab865fa..a8ce2cc7 100644 --- a/src/gradient/types/knowledge_bases/__init__.py +++ b/src/gradient/types/knowledge_bases/__init__.py @@ -27,6 +27,9 @@ from .data_source_create_presigned_urls_params import ( DataSourceCreatePresignedURLsParams as DataSourceCreatePresignedURLsParams, ) +from .indexing_job_retrieve_signed_url_response import ( + IndexingJobRetrieveSignedURLResponse as IndexingJobRetrieveSignedURLResponse, +) from .data_source_create_presigned_urls_response import ( DataSourceCreatePresignedURLsResponse as DataSourceCreatePresignedURLsResponse, ) diff --git a/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py new file mode 100644 index 00000000..2ef60e45 --- /dev/null +++ b/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["IndexingJobRetrieveSignedURLResponse"] + + +class IndexingJobRetrieveSignedURLResponse(BaseModel): + signed_url: Optional[str] = None + """The signed url for downloading the indexing job details""" diff --git a/src/gradient/types/nf_create_params.py b/src/gradient/types/nf_create_params.py new file mode 100644 index 00000000..327beb2e --- /dev/null +++ b/src/gradient/types/nf_create_params.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .._types import SequenceNotStr + +__all__ = ["NfCreateParams"] + + +class NfCreateParams(TypedDict, total=False): + name: Required[str] + """The human-readable name of the share.""" + + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" + + size_gib: Required[int] + """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.""" + + vpc_ids: Required[SequenceNotStr[str]] + """List of VPC IDs that should be able to access the share.""" diff --git a/src/gradient/types/nf_create_response.py b/src/gradient/types/nf_create_response.py new file mode 100644 index 00000000..5016d776 --- /dev/null +++ b/src/gradient/types/nf_create_response.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["NfCreateResponse", "Share"] + + +class Share(BaseModel): + id: str + """The unique identifier of the NFS share.""" + + created_at: datetime + """Timestamp for when the NFS share was created.""" + + name: str + """The human-readable name of the share.""" + + region: str + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" + + size_gib: int + """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.""" + + status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"] + """The current status of the share.""" + + host: Optional[str] = None + """The host IP of the NFS server that will be accessible from the associated VPC""" + + mount_path: Optional[str] = None + """ + Path at which the share will be available, to be mounted at a target of the + user's choice within the client + """ + + vpc_ids: Optional[List[str]] = None + """List of VPC IDs that should be able to access the share.""" + + +class NfCreateResponse(BaseModel): + share: Optional[Share] = None diff --git a/src/gradient/types/nf_delete_params.py b/src/gradient/types/nf_delete_params.py new file mode 100644 index 00000000..a11474e5 --- /dev/null +++ b/src/gradient/types/nf_delete_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["NfDeleteParams"] + + +class NfDeleteParams(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" diff --git a/src/gradient/types/nf_initiate_action_params.py b/src/gradient/types/nf_initiate_action_params.py new file mode 100644 index 00000000..a187f56d --- /dev/null +++ b/src/gradient/types/nf_initiate_action_params.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "NfInitiateActionParams", + "NfsActionResize", + "NfsActionResizeParams", + "NfsActionSnapshot", + "NfsActionSnapshotParams", +] + + +class NfsActionResize(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.""" + + type: Required[Literal["resize", "snapshot"]] + """The type of action to initiate for the NFS share (such as resize or snapshot).""" + + params: NfsActionResizeParams + + +class NfsActionResizeParams(TypedDict, total=False): + size_gib: Required[int] + """The new size for the NFS share.""" + + +class NfsActionSnapshot(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.""" + + type: Required[Literal["resize", "snapshot"]] + """The type of action to initiate for the NFS share (such as resize or snapshot).""" + + params: NfsActionSnapshotParams + + +class NfsActionSnapshotParams(TypedDict, total=False): + name: Required[str] + """Snapshot name of the NFS share""" + + +NfInitiateActionParams: TypeAlias = Union[NfsActionResize, NfsActionSnapshot] diff --git a/src/gradient/types/nf_initiate_action_response.py b/src/gradient/types/nf_initiate_action_response.py new file mode 100644 index 00000000..9f38a4b2 --- /dev/null +++ b/src/gradient/types/nf_initiate_action_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["NfInitiateActionResponse", "Action"] + + +class Action(BaseModel): + region_slug: str + """The DigitalOcean region slug where the resource is located.""" + + resource_id: str + """The unique identifier of the resource on which the action is being performed.""" + + resource_type: Literal["network_file_share", "network_file_share_snapshot"] + """The type of resource on which the action is being performed.""" + + started_at: datetime + """The timestamp when the action was started.""" + + status: Literal["in-progress", "completed", "errored"] + """The current status of the action.""" + + type: str + """The type of action being performed.""" + + +class NfInitiateActionResponse(BaseModel): + action: Action + """The action that was submitted.""" diff --git a/src/gradient/types/nf_list_params.py b/src/gradient/types/nf_list_params.py new file mode 100644 index 00000000..bc53c284 --- /dev/null +++ b/src/gradient/types/nf_list_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["NfListParams"] + + +class NfListParams(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" diff --git a/src/gradient/types/nf_list_response.py b/src/gradient/types/nf_list_response.py new file mode 100644 index 00000000..c5af118b --- /dev/null +++ b/src/gradient/types/nf_list_response.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["NfListResponse", "Share"] + + +class Share(BaseModel): + id: str + """The unique identifier of the NFS share.""" + + created_at: datetime + """Timestamp for when the NFS share was created.""" + + name: str + """The human-readable name of the share.""" + + region: str + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" + + size_gib: int + """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.""" + + status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"] + """The current status of the share.""" + + host: Optional[str] = None + """The host IP of the NFS server that will be accessible from the associated VPC""" + + mount_path: Optional[str] = None + """ + Path at which the share will be available, to be mounted at a target of the + user's choice within the client + """ + + vpc_ids: Optional[List[str]] = None + """List of VPC IDs that should be able to access the share.""" + + +class NfListResponse(BaseModel): + shares: Optional[List[Share]] = None diff --git a/src/gradient/types/nf_retrieve_params.py b/src/gradient/types/nf_retrieve_params.py new file mode 100644 index 00000000..292053d9 --- /dev/null +++ b/src/gradient/types/nf_retrieve_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["NfRetrieveParams"] + + +class NfRetrieveParams(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" diff --git a/src/gradient/types/nf_retrieve_response.py b/src/gradient/types/nf_retrieve_response.py new file mode 100644 index 00000000..897f07f0 --- /dev/null +++ b/src/gradient/types/nf_retrieve_response.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["NfRetrieveResponse", "Share"] + + +class Share(BaseModel): + id: str + """The unique identifier of the NFS share.""" + + created_at: datetime + """Timestamp for when the NFS share was created.""" + + name: str + """The human-readable name of the share.""" + + region: str + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" + + size_gib: int + """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.""" + + status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"] + """The current status of the share.""" + + host: Optional[str] = None + """The host IP of the NFS server that will be accessible from the associated VPC""" + + mount_path: Optional[str] = None + """ + Path at which the share will be available, to be mounted at a target of the + user's choice within the client + """ + + vpc_ids: Optional[List[str]] = None + """List of VPC IDs that should be able to access the share.""" + + +class NfRetrieveResponse(BaseModel): + share: Optional[Share] = None diff --git a/src/gradient/types/nfs/__init__.py b/src/gradient/types/nfs/__init__.py new file mode 100644 index 00000000..41777980 --- /dev/null +++ b/src/gradient/types/nfs/__init__.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .snapshot_list_params import SnapshotListParams as SnapshotListParams +from .snapshot_delete_params import SnapshotDeleteParams as SnapshotDeleteParams +from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse +from .snapshot_retrieve_params import SnapshotRetrieveParams as SnapshotRetrieveParams +from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse diff --git a/src/gradient/types/nfs/snapshot_delete_params.py b/src/gradient/types/nfs/snapshot_delete_params.py new file mode 100644 index 00000000..1b26149e --- /dev/null +++ b/src/gradient/types/nfs/snapshot_delete_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["SnapshotDeleteParams"] + + +class SnapshotDeleteParams(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" diff --git a/src/gradient/types/nfs/snapshot_list_params.py b/src/gradient/types/nfs/snapshot_list_params.py new file mode 100644 index 00000000..8c4c6946 --- /dev/null +++ b/src/gradient/types/nfs/snapshot_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["SnapshotListParams"] + + +class SnapshotListParams(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" + + share_id: str + """The unique ID of an NFS share. + + If provided, only snapshots of this specific share will be returned. + """ diff --git a/src/gradient/types/nfs/snapshot_list_response.py b/src/gradient/types/nfs/snapshot_list_response.py new file mode 100644 index 00000000..8a6864dc --- /dev/null +++ b/src/gradient/types/nfs/snapshot_list_response.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["SnapshotListResponse", "Snapshot"] + + +class Snapshot(BaseModel): + id: str + """The unique identifier of the snapshot.""" + + created_at: datetime + """The timestamp when the snapshot was created.""" + + name: str + """The human-readable name of the snapshot.""" + + region: str + """The DigitalOcean region slug where the snapshot is located.""" + + share_id: str + """The unique identifier of the share from which this snapshot was created.""" + + size_gib: int + """The size of the snapshot in GiB.""" + + status: Literal["UNKNOWN", "CREATING", "ACTIVE", "FAILED", "DELETED"] + """The current status of the snapshot.""" + + +class SnapshotListResponse(BaseModel): + snapshots: Optional[List[Snapshot]] = None diff --git a/src/gradient/types/nfs/snapshot_retrieve_params.py b/src/gradient/types/nfs/snapshot_retrieve_params.py new file mode 100644 index 00000000..d1e1f8e8 --- /dev/null +++ b/src/gradient/types/nfs/snapshot_retrieve_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["SnapshotRetrieveParams"] + + +class SnapshotRetrieveParams(TypedDict, total=False): + region: Required[str] + """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.""" diff --git a/src/gradient/types/nfs/snapshot_retrieve_response.py b/src/gradient/types/nfs/snapshot_retrieve_response.py new file mode 100644 index 00000000..2d54d523 --- /dev/null +++ b/src/gradient/types/nfs/snapshot_retrieve_response.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["SnapshotRetrieveResponse", "Snapshot"] + + +class Snapshot(BaseModel): + id: str + """The unique identifier of the snapshot.""" + + created_at: datetime + """The timestamp when the snapshot was created.""" + + name: str + """The human-readable name of the snapshot.""" + + region: str + """The DigitalOcean region slug where the snapshot is located.""" + + share_id: str + """The unique identifier of the share from which this snapshot was created.""" + + size_gib: int + """The size of the snapshot in GiB.""" + + status: Literal["UNKNOWN", "CREATING", "ACTIVE", "FAILED", "DELETED"] + """The current status of the snapshot.""" + + +class SnapshotRetrieveResponse(BaseModel): + snapshot: Optional[Snapshot] = None + """Represents an NFS snapshot.""" diff --git a/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py b/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py new file mode 100644 index 00000000..388e06c9 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py @@ -0,0 +1,274 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types.agents.evaluation_metrics import ( + ScheduledIndexingCreateResponse, + ScheduledIndexingDeleteResponse, + ScheduledIndexingRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestScheduledIndexing: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: Gradient) -> None: + scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.create() + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params(self, client: Gradient) -> None: + scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.create( + days=[123], + knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000", + time="example string", + ) + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: Gradient) -> None: + response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + scheduled_indexing = response.parse() + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: Gradient) -> None: + with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + scheduled_indexing = response.parse() + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_retrieve(self, client: Gradient) -> None: + scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.retrieve( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_retrieve(self, client: Gradient) -> None: + response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + scheduled_indexing = response.parse() + assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_retrieve(self, client: Gradient) -> None: + with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.retrieve( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + scheduled_indexing = response.parse() + assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_retrieve(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: Gradient) -> None: + scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.delete( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: Gradient) -> None: + response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + scheduled_indexing = response.parse() + assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: Gradient) -> None: + with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.delete( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + scheduled_indexing = response.parse() + assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete( + "", + ) + + +class TestAsyncScheduledIndexing: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncGradient) -> None: + scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.create() + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None: + scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.create( + days=[123], + knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000", + time="example string", + ) + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: + response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + scheduled_indexing = await response.parse() + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: + async with ( + async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.create() + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + scheduled_indexing = await response.parse() + assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: + scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.retrieve( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: + response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + scheduled_indexing = await response.parse() + assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: + async with async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.retrieve( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + scheduled_indexing = await response.parse() + assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncGradient) -> None: + scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.delete( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: + response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + scheduled_indexing = await response.parse() + assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: + async with async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.delete( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + scheduled_indexing = await response.parse() + assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index 3dffaa69..aab5d9ac 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -14,6 +14,7 @@ IndexingJobCreateResponse, IndexingJobRetrieveResponse, IndexingJobUpdateCancelResponse, + IndexingJobRetrieveSignedURLResponse, IndexingJobRetrieveDataSourcesResponse, ) @@ -181,6 +182,48 @@ def test_path_params_retrieve_data_sources(self, client: Gradient) -> None: "", ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_retrieve_signed_url(self, client: Gradient) -> None: + indexing_job = client.knowledge_bases.indexing_jobs.retrieve_signed_url( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_retrieve_signed_url(self, client: Gradient) -> None: + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_retrieve_signed_url(self, client: Gradient) -> None: + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_signed_url( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_retrieve_signed_url(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url( + "", + ) + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize def test_method_update_cancel(self, client: Gradient) -> None: @@ -396,6 +439,48 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie "", ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_retrieve_signed_url(self, async_client: AsyncGradient) -> None: + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_signed_url( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_retrieve_signed_url(self, async_client: AsyncGradient) -> None: + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_retrieve_signed_url(self, async_client: AsyncGradient) -> None: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_signed_url( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_retrieve_signed_url(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url( + "", + ) + @pytest.mark.skip(reason="Prism tests are disabled") @parametrize async def test_method_update_cancel(self, async_client: AsyncGradient) -> None: diff --git a/tests/api_resources/nfs/__init__.py b/tests/api_resources/nfs/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/nfs/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/nfs/test_snapshots.py b/tests/api_resources/nfs/test_snapshots.py new file mode 100644 index 00000000..e17265f3 --- /dev/null +++ b/tests/api_resources/nfs/test_snapshots.py @@ -0,0 +1,297 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types.nfs import ( + SnapshotListResponse, + SnapshotRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSnapshots: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_retrieve(self, client: Gradient) -> None: + snapshot = client.nfs.snapshots.retrieve( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_retrieve(self, client: Gradient) -> None: + response = client.nfs.snapshots.with_raw_response.retrieve( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_retrieve(self, client: Gradient) -> None: + with client.nfs.snapshots.with_streaming_response.retrieve( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_retrieve(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"): + client.nfs.snapshots.with_raw_response.retrieve( + nfs_snapshot_id="", + region="region", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: Gradient) -> None: + snapshot = client.nfs.snapshots.list( + region="region", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list_with_all_params(self, client: Gradient) -> None: + snapshot = client.nfs.snapshots.list( + region="region", + share_id="share_id", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: Gradient) -> None: + response = client.nfs.snapshots.with_raw_response.list( + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: Gradient) -> None: + with client.nfs.snapshots.with_streaming_response.list( + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: Gradient) -> None: + snapshot = client.nfs.snapshots.delete( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert snapshot is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: Gradient) -> None: + response = client.nfs.snapshots.with_raw_response.delete( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert snapshot is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: Gradient) -> None: + with client.nfs.snapshots.with_streaming_response.delete( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"): + client.nfs.snapshots.with_raw_response.delete( + nfs_snapshot_id="", + region="region", + ) + + +class TestAsyncSnapshots: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: + snapshot = await async_client.nfs.snapshots.retrieve( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.snapshots.with_raw_response.retrieve( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.snapshots.with_streaming_response.retrieve( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"): + await async_client.nfs.snapshots.with_raw_response.retrieve( + nfs_snapshot_id="", + region="region", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncGradient) -> None: + snapshot = await async_client.nfs.snapshots.list( + region="region", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: + snapshot = await async_client.nfs.snapshots.list( + region="region", + share_id="share_id", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.snapshots.with_raw_response.list( + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.snapshots.with_streaming_response.list( + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncGradient) -> None: + snapshot = await async_client.nfs.snapshots.delete( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert snapshot is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.snapshots.with_raw_response.delete( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert snapshot is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.snapshots.with_streaming_response.delete( + nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"): + await async_client.nfs.snapshots.with_raw_response.delete( + nfs_snapshot_id="", + region="region", + ) diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 3e02f057..55ffdc93 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -15,6 +15,7 @@ KnowledgeBaseDeleteResponse, KnowledgeBaseUpdateResponse, KnowledgeBaseRetrieveResponse, + KnowledgeBaseListIndexingJobsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -280,6 +281,48 @@ def test_path_params_delete(self, client: Gradient) -> None: "", ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list_indexing_jobs(self, client: Gradient) -> None: + knowledge_base = client.knowledge_bases.list_indexing_jobs( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list_indexing_jobs(self, client: Gradient) -> None: + response = client.knowledge_bases.with_raw_response.list_indexing_jobs( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list_indexing_jobs(self, client: Gradient) -> None: + with client.knowledge_bases.with_streaming_response.list_indexing_jobs( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_list_indexing_jobs(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.knowledge_bases.with_raw_response.list_indexing_jobs( + "", + ) + class TestAsyncKnowledgeBases: parametrize = pytest.mark.parametrize( @@ -542,3 +585,45 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: await async_client.knowledge_bases.with_raw_response.delete( "", ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list_indexing_jobs(self, async_client: AsyncGradient) -> None: + knowledge_base = await async_client.knowledge_bases.list_indexing_jobs( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list_indexing_jobs(self, async_client: AsyncGradient) -> None: + response = await async_client.knowledge_bases.with_raw_response.list_indexing_jobs( + '"123e4567-e89b-12d3-a456-426614174000"', + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list_indexing_jobs(self, async_client: AsyncGradient) -> None: + async with async_client.knowledge_bases.with_streaming_response.list_indexing_jobs( + '"123e4567-e89b-12d3-a456-426614174000"', + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_list_indexing_jobs(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.list_indexing_jobs( + "", + ) diff --git a/tests/api_resources/test_nfs.py b/tests/api_resources/test_nfs.py new file mode 100644 index 00000000..f2749330 --- /dev/null +++ b/tests/api_resources/test_nfs.py @@ -0,0 +1,611 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types import ( + NfListResponse, + NfCreateResponse, + NfRetrieveResponse, + NfInitiateActionResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestNfs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create(self, client: Gradient) -> None: + nf = client.nfs.create( + name="sammy-share-drive", + region="atl1", + size_gib=1024, + vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"], + ) + assert_matches_type(NfCreateResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create(self, client: Gradient) -> None: + response = client.nfs.with_raw_response.create( + name="sammy-share-drive", + region="atl1", + size_gib=1024, + vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = response.parse() + assert_matches_type(NfCreateResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create(self, client: Gradient) -> None: + with client.nfs.with_streaming_response.create( + name="sammy-share-drive", + region="atl1", + size_gib=1024, + vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = response.parse() + assert_matches_type(NfCreateResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_retrieve(self, client: Gradient) -> None: + nf = client.nfs.retrieve( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert_matches_type(NfRetrieveResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_retrieve(self, client: Gradient) -> None: + response = client.nfs.with_raw_response.retrieve( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = response.parse() + assert_matches_type(NfRetrieveResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_retrieve(self, client: Gradient) -> None: + with client.nfs.with_streaming_response.retrieve( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = response.parse() + assert_matches_type(NfRetrieveResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_retrieve(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + client.nfs.with_raw_response.retrieve( + nfs_id="", + region="region", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_list(self, client: Gradient) -> None: + nf = client.nfs.list( + region="region", + ) + assert_matches_type(NfListResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_list(self, client: Gradient) -> None: + response = client.nfs.with_raw_response.list( + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = response.parse() + assert_matches_type(NfListResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_list(self, client: Gradient) -> None: + with client.nfs.with_streaming_response.list( + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = response.parse() + assert_matches_type(NfListResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_delete(self, client: Gradient) -> None: + nf = client.nfs.delete( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert nf is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_delete(self, client: Gradient) -> None: + response = client.nfs.with_raw_response.delete( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = response.parse() + assert nf is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_delete(self, client: Gradient) -> None: + with client.nfs.with_streaming_response.delete( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = response.parse() + assert nf is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_delete(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + client.nfs.with_raw_response.delete( + nfs_id="", + region="region", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_initiate_action_overload_1(self, client: Gradient) -> None: + nf = client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_initiate_action_with_all_params_overload_1(self, client: Gradient) -> None: + nf = client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + params={"size_gib": 2048}, + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_initiate_action_overload_1(self, client: Gradient) -> None: + response = client.nfs.with_raw_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_initiate_action_overload_1(self, client: Gradient) -> None: + with client.nfs.with_streaming_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_initiate_action_overload_1(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + client.nfs.with_raw_response.initiate_action( + nfs_id="", + region="atl1", + type="resize", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_initiate_action_overload_2(self, client: Gradient) -> None: + nf = client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_initiate_action_with_all_params_overload_2(self, client: Gradient) -> None: + nf = client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + params={"name": "daily-backup"}, + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_initiate_action_overload_2(self, client: Gradient) -> None: + response = client.nfs.with_raw_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_initiate_action_overload_2(self, client: Gradient) -> None: + with client.nfs.with_streaming_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_path_params_initiate_action_overload_2(self, client: Gradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + client.nfs.with_raw_response.initiate_action( + nfs_id="", + region="atl1", + type="resize", + ) + + +class TestAsyncNfs: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.create( + name="sammy-share-drive", + region="atl1", + size_gib=1024, + vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"], + ) + assert_matches_type(NfCreateResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.with_raw_response.create( + name="sammy-share-drive", + region="atl1", + size_gib=1024, + vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = await response.parse() + assert_matches_type(NfCreateResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.with_streaming_response.create( + name="sammy-share-drive", + region="atl1", + size_gib=1024, + vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = await response.parse() + assert_matches_type(NfCreateResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.retrieve( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert_matches_type(NfRetrieveResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.with_raw_response.retrieve( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = await response.parse() + assert_matches_type(NfRetrieveResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.with_streaming_response.retrieve( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = await response.parse() + assert_matches_type(NfRetrieveResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + await async_client.nfs.with_raw_response.retrieve( + nfs_id="", + region="region", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_list(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.list( + region="region", + ) + assert_matches_type(NfListResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.with_raw_response.list( + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = await response.parse() + assert_matches_type(NfListResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.with_streaming_response.list( + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = await response.parse() + assert_matches_type(NfListResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_delete(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.delete( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + assert nf is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.with_raw_response.delete( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = await response.parse() + assert nf is None + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.with_streaming_response.delete( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="region", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = await response.parse() + assert nf is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + await async_client.nfs.with_raw_response.delete( + nfs_id="", + region="region", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_initiate_action_overload_1(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_initiate_action_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + params={"size_gib": 2048}, + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_initiate_action_overload_1(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.with_raw_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = await response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_initiate_action_overload_1(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.with_streaming_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = await response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_initiate_action_overload_1(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + await async_client.nfs.with_raw_response.initiate_action( + nfs_id="", + region="atl1", + type="resize", + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_initiate_action_overload_2(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_initiate_action_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: + nf = await async_client.nfs.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + params={"name": "daily-backup"}, + ) + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_initiate_action_overload_2(self, async_client: AsyncGradient) -> None: + response = await async_client.nfs.with_raw_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + nf = await response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_initiate_action_overload_2(self, async_client: AsyncGradient) -> None: + async with async_client.nfs.with_streaming_response.initiate_action( + nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d", + region="atl1", + type="resize", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + nf = await response.parse() + assert_matches_type(NfInitiateActionResponse, nf, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_path_params_initiate_action_overload_2(self, async_client: AsyncGradient) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"): + await async_client.nfs.with_raw_response.initiate_action( + nfs_id="", + region="atl1", + type="resize", + ) From b6b56c14d908d02fa092a523117d84e3d450c47f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Nov 2025 06:06:21 +0000 Subject: [PATCH 193/200] chore(internal): grammar fix (it's -> its) --- src/gradient/_utils/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gradient/_utils/_utils.py b/src/gradient/_utils/_utils.py index 50d59269..eec7f4a1 100644 --- a/src/gradient/_utils/_utils.py +++ b/src/gradient/_utils/_utils.py @@ -133,7 +133,7 @@ def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]: # Type safe methods for narrowing types with TypeVars. # The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], # however this cause Pyright to rightfully report errors. As we know we don't -# care about the contained types we can safely use `object` in it's place. +# care about the contained types we can safely use `object` in its place. # # There are two separate functions defined, `is_*` and `is_*_t` for different use cases. # `is_*` is for when you're dealing with an unknown input From da10726a59069295265e339261604280302c9c80 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Nov 2025 05:56:09 +0000 Subject: [PATCH 194/200] chore(package): drop Python 3.8 support --- README.md | 4 ++-- pyproject.toml | 5 ++--- src/gradient/_utils/_sync.py | 34 +++------------------------------- 3 files changed, 7 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 56880ead..d9bc6f1b 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![PyPI version](https://img.shields.io/pypi/v/gradient.svg?label=pypi%20(stable))](https://pypi.org/project/gradient/) -The Gradient Python library provides convenient access to the Gradient REST API from any Python 3.8+ +The Gradient Python library provides convenient access to the Gradient REST API from any Python 3.9+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -488,7 +488,7 @@ print(gradient.__version__) ## Requirements -Python 3.8 or higher. +Python 3.9 or higher. ## Contributing diff --git a/pyproject.toml b/pyproject.toml index 07e0ccc0..384e482c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,11 +15,10 @@ dependencies = [ "distro>=1.7.0, <2", "sniffio", ] -requires-python = ">= 3.8" +requires-python = ">= 3.9" classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -141,7 +140,7 @@ filterwarnings = [ # there are a couple of flags that are still disabled by # default in strict mode as they are experimental and niche. typeCheckingMode = "strict" -pythonVersion = "3.8" +pythonVersion = "3.9" exclude = [ "_dev", diff --git a/src/gradient/_utils/_sync.py b/src/gradient/_utils/_sync.py index ad7ec71b..f6027c18 100644 --- a/src/gradient/_utils/_sync.py +++ b/src/gradient/_utils/_sync.py @@ -1,10 +1,8 @@ from __future__ import annotations -import sys import asyncio import functools -import contextvars -from typing import Any, TypeVar, Callable, Awaitable +from typing import TypeVar, Callable, Awaitable from typing_extensions import ParamSpec import anyio @@ -15,34 +13,11 @@ T_ParamSpec = ParamSpec("T_ParamSpec") -if sys.version_info >= (3, 9): - _asyncio_to_thread = asyncio.to_thread -else: - # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread - # for Python 3.8 support - async def _asyncio_to_thread( - func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs - ) -> Any: - """Asynchronously run function *func* in a separate thread. - - Any *args and **kwargs supplied for this function are directly passed - to *func*. Also, the current :class:`contextvars.Context` is propagated, - allowing context variables from the main thread to be accessed in the - separate thread. - - Returns a coroutine that can be awaited to get the eventual result of *func*. - """ - loop = asyncio.events.get_running_loop() - ctx = contextvars.copy_context() - func_call = functools.partial(ctx.run, func, *args, **kwargs) - return await loop.run_in_executor(None, func_call) - - async def to_thread( func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs ) -> T_Retval: if sniffio.current_async_library() == "asyncio": - return await _asyncio_to_thread(func, *args, **kwargs) + return await asyncio.to_thread(func, *args, **kwargs) return await anyio.to_thread.run_sync( functools.partial(func, *args, **kwargs), @@ -53,10 +28,7 @@ async def to_thread( def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: """ Take a blocking function and create an async one that receives the same - positional and keyword arguments. For python version 3.9 and above, it uses - asyncio.to_thread to run the function in a separate thread. For python version - 3.8, it uses locally defined copy of the asyncio.to_thread function which was - introduced in python 3.9. + positional and keyword arguments. Usage: From 0e6e5d0bf9ad67dd0a040fc9066a33617d023ddb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Nov 2025 05:56:55 +0000 Subject: [PATCH 195/200] fix: compat with Python 3.14 --- src/gradient/_models.py | 11 ++++++++--- tests/test_models.py | 8 ++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/gradient/_models.py b/src/gradient/_models.py index 6a3cd1d2..fcec2cf9 100644 --- a/src/gradient/_models.py +++ b/src/gradient/_models.py @@ -2,6 +2,7 @@ import os import inspect +import weakref from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( @@ -573,6 +574,9 @@ class CachedDiscriminatorType(Protocol): __discriminator__: DiscriminatorDetails +DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary() + + class DiscriminatorDetails: field_name: str """The name of the discriminator field in the variant class, e.g. @@ -615,8 +619,9 @@ def __init__( def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: - if isinstance(union, CachedDiscriminatorType): - return union.__discriminator__ + cached = DISCRIMINATOR_CACHE.get(union) + if cached is not None: + return cached discriminator_field_name: str | None = None @@ -669,7 +674,7 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, discriminator_field=discriminator_field_name, discriminator_alias=discriminator_alias, ) - cast(CachedDiscriminatorType, union).__discriminator__ = details + DISCRIMINATOR_CACHE.setdefault(union, details) return details diff --git a/tests/test_models.py b/tests/test_models.py index de5ef465..ba635571 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -9,7 +9,7 @@ from gradient._utils import PropertyInfo from gradient._compat import PYDANTIC_V1, parse_obj, model_dump, model_json -from gradient._models import BaseModel, construct_type +from gradient._models import DISCRIMINATOR_CACHE, BaseModel, construct_type class BasicModel(BaseModel): @@ -809,7 +809,7 @@ class B(BaseModel): UnionType = cast(Any, Union[A, B]) - assert not hasattr(UnionType, "__discriminator__") + assert not DISCRIMINATOR_CACHE.get(UnionType) m = construct_type( value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")]) @@ -818,7 +818,7 @@ class B(BaseModel): assert m.type == "b" assert m.data == "foo" # type: ignore[comparison-overlap] - discriminator = UnionType.__discriminator__ + discriminator = DISCRIMINATOR_CACHE.get(UnionType) assert discriminator is not None m = construct_type( @@ -830,7 +830,7 @@ class B(BaseModel): # if the discriminator details object stays the same between invocations then # we hit the cache - assert UnionType.__discriminator__ is discriminator + assert DISCRIMINATOR_CACHE.get(UnionType) is discriminator @pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1") From f84c3ed965a61514f4ee7549c7ccbc4558c4d97c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 12 Nov 2025 05:31:18 +0000 Subject: [PATCH 196/200] fix(compat): update signatures of `model_dump` and `model_dump_json` for Pydantic v1 --- src/gradient/_models.py | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/src/gradient/_models.py b/src/gradient/_models.py index fcec2cf9..ca9500b2 100644 --- a/src/gradient/_models.py +++ b/src/gradient/_models.py @@ -257,15 +257,16 @@ def model_dump( mode: Literal["json", "python"] | str = "python", include: IncEx | None = None, exclude: IncEx | None = None, + context: Any | None = None, by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, - context: dict[str, Any] | None = None, - serialize_as_any: bool = False, fallback: Callable[[Any], Any] | None = None, + serialize_as_any: bool = False, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump @@ -273,16 +274,24 @@ def model_dump( Args: mode: The mode in which `to_python` should run. - If mode is 'json', the dictionary will only contain JSON serializable types. - If mode is 'python', the dictionary may contain any Python objects. - include: A list of fields to include in the output. - exclude: A list of fields to exclude from the output. + If mode is 'json', the output will only contain JSON serializable types. + If mode is 'python', the output may contain non-JSON-serializable Python objects. + include: A set of fields to include in the output. + exclude: A set of fields to exclude from the output. + context: Additional context to pass to the serializer. by_alias: Whether to use the field's alias in the dictionary key if defined. - exclude_unset: Whether to exclude fields that are unset or None from the output. - exclude_defaults: Whether to exclude fields that are set to their default value from the output. - exclude_none: Whether to exclude fields that have a value of `None` from the output. - round_trip: Whether to enable serialization and deserialization round-trip support. - warnings: Whether to log warnings when invalid fields are encountered. + exclude_unset: Whether to exclude fields that have not been explicitly set. + exclude_defaults: Whether to exclude fields that are set to their default value. + exclude_none: Whether to exclude fields that have a value of `None`. + exclude_computed_fields: Whether to exclude computed fields. + While this can be useful for round-tripping, it is usually recommended to use the dedicated + `round_trip` parameter instead. + round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T]. + warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors, + "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError]. + fallback: A function to call when an unknown value is encountered. If not provided, + a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised. + serialize_as_any: Whether to serialize fields with duck-typing serialization behavior. Returns: A dictionary representation of the model. @@ -299,6 +308,8 @@ def model_dump( raise ValueError("serialize_as_any is only supported in Pydantic v2") if fallback is not None: raise ValueError("fallback is only supported in Pydantic v2") + if exclude_computed_fields != False: + raise ValueError("exclude_computed_fields is only supported in Pydantic v2") dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, @@ -315,15 +326,17 @@ def model_dump_json( self, *, indent: int | None = None, + ensure_ascii: bool = False, include: IncEx | None = None, exclude: IncEx | None = None, + context: Any | None = None, by_alias: bool | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, + exclude_computed_fields: bool = False, round_trip: bool = False, warnings: bool | Literal["none", "warn", "error"] = True, - context: dict[str, Any] | None = None, fallback: Callable[[Any], Any] | None = None, serialize_as_any: bool = False, ) -> str: @@ -355,6 +368,10 @@ def model_dump_json( raise ValueError("serialize_as_any is only supported in Pydantic v2") if fallback is not None: raise ValueError("fallback is only supported in Pydantic v2") + if ensure_ascii != False: + raise ValueError("ensure_ascii is only supported in Pydantic v2") + if exclude_computed_fields != False: + raise ValueError("exclude_computed_fields is only supported in Pydantic v2") return super().json( # type: ignore[reportDeprecated] indent=indent, include=include, From a5a87b71df8812f872916dea5bc407aae985915c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Nov 2025 17:55:19 +0000 Subject: [PATCH 197/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f391d416..27d2fbbc 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.6.0" + ".": "3.7.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 384e482c..8abecd07 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.6.0" +version = "3.7.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 0190d688..6e29a000 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.6.0" # x-release-please-version +__version__ = "3.7.0" # x-release-please-version From 8f29c502ccf2963ff92ff0e27c7b6e9cac647362 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 19:35:34 +0000 Subject: [PATCH 198/200] feat(api): manual updates --- .stats.yml | 2 +- api.md | 12 - .../agents/evaluation_metrics/__init__.py | 14 - .../evaluation_metrics/evaluation_metrics.py | 32 --- .../agents/evaluation_metrics/models.py | 254 ------------------ .../agents/evaluation_metrics/__init__.py | 2 - .../evaluation_metrics/model_list_params.py | 42 --- .../evaluation_metrics/model_list_response.py | 21 -- .../agents/evaluation_metrics/test_models.py | 102 ------- 9 files changed, 1 insertion(+), 480 deletions(-) delete mode 100644 src/gradient/resources/agents/evaluation_metrics/models.py delete mode 100644 src/gradient/types/agents/evaluation_metrics/model_list_params.py delete mode 100644 src/gradient/types/agents/evaluation_metrics/model_list_response.py delete mode 100644 tests/api_resources/agents/evaluation_metrics/test_models.py diff --git a/.stats.yml b/.stats.yml index 425463f6..29f00c95 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 188 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-f07d74847e620dfa26d8df40ea4680814af9bba381b3a57a7b6ed76ad49d85f8.yml openapi_spec_hash: e3553dc2abf2afd4368b736bcc32a289 -config_hash: b712366a70c9d33e22d40eb601ca972f +config_hash: b28984dd49d4baf1d68572efe83ac103 diff --git a/api.md b/api.md index 49135772..e32fae32 100644 --- a/api.md +++ b/api.md @@ -155,18 +155,6 @@ Methods: - client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse - client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse -### Models - -Types: - -```python -from gradient.types.agents.evaluation_metrics import ModelListResponse -``` - -Methods: - -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse - ### Anthropic #### Keys diff --git a/src/gradient/resources/agents/evaluation_metrics/__init__.py b/src/gradient/resources/agents/evaluation_metrics/__init__.py index 515a221b..fcb54c78 100644 --- a/src/gradient/resources/agents/evaluation_metrics/__init__.py +++ b/src/gradient/resources/agents/evaluation_metrics/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from .oauth2 import ( Oauth2Resource, AsyncOauth2Resource, @@ -64,12 +56,6 @@ "AsyncWorkspacesResourceWithRawResponse", "WorkspacesResourceWithStreamingResponse", "AsyncWorkspacesResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", "AnthropicResource", "AsyncAnthropicResource", "AnthropicResourceWithRawResponse", diff --git a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py index 14ea4d55..b9080132 100644 --- a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -4,14 +4,6 @@ import httpx -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property @@ -75,10 +67,6 @@ class EvaluationMetricsResource(SyncAPIResource): def workspaces(self) -> WorkspacesResource: return WorkspacesResource(self._client) - @cached_property - def models(self) -> ModelsResource: - return ModelsResource(self._client) - @cached_property def anthropic(self) -> AnthropicResource: return AnthropicResource(self._client) @@ -192,10 +180,6 @@ class AsyncEvaluationMetricsResource(AsyncAPIResource): def workspaces(self) -> AsyncWorkspacesResource: return AsyncWorkspacesResource(self._client) - @cached_property - def models(self) -> AsyncModelsResource: - return AsyncModelsResource(self._client) - @cached_property def anthropic(self) -> AsyncAnthropicResource: return AsyncAnthropicResource(self._client) @@ -319,10 +303,6 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: def workspaces(self) -> WorkspacesResourceWithRawResponse: return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> ModelsResourceWithRawResponse: - return ModelsResourceWithRawResponse(self._evaluation_metrics.models) - @cached_property def anthropic(self) -> AnthropicResourceWithRawResponse: return AnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic) @@ -355,10 +335,6 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse: return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> AsyncModelsResourceWithRawResponse: - return AsyncModelsResourceWithRawResponse(self._evaluation_metrics.models) - @cached_property def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: return AsyncAnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic) @@ -391,10 +367,6 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: def workspaces(self) -> WorkspacesResourceWithStreamingResponse: return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> ModelsResourceWithStreamingResponse: - return ModelsResourceWithStreamingResponse(self._evaluation_metrics.models) - @cached_property def anthropic(self) -> AnthropicResourceWithStreamingResponse: return AnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic) @@ -427,10 +399,6 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse: return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> AsyncModelsResourceWithStreamingResponse: - return AsyncModelsResourceWithStreamingResponse(self._evaluation_metrics.models) - @cached_property def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: return AsyncAnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic) diff --git a/src/gradient/resources/agents/evaluation_metrics/models.py b/src/gradient/resources/agents/evaluation_metrics/models.py deleted file mode 100644 index 7728e662..00000000 --- a/src/gradient/resources/agents/evaluation_metrics/models.py +++ /dev/null @@ -1,254 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.evaluation_metrics import model_list_params -from ....types.agents.evaluation_metrics.model_list_response import ModelListResponse - -__all__ = ["ModelsResource", "AsyncModelsResource"] - - -class ModelsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers - """ - return ModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response - """ - return ModelsResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | Omit = omit, - per_page: int | Omit = omit, - public_only: bool | Omit = omit, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> ModelListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: Page number. - - per_page: Items per page. - - public_only: Only include models that are publicly available. - - usecases: Include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), - ), - cast_to=ModelListResponse, - ) - - -class AsyncModelsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers - """ - return AsyncModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response - """ - return AsyncModelsResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | Omit = omit, - per_page: int | Omit = omit, - public_only: bool | Omit = omit, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | Omit = omit, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = not_given, - ) -> ModelListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: Page number. - - per_page: Items per page. - - public_only: Only include models that are publicly available. - - usecases: Include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), - ), - cast_to=ModelListResponse, - ) - - -class ModelsResourceWithRawResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.list = to_raw_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithRawResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.list = async_to_raw_response_wrapper( - models.list, - ) - - -class ModelsResourceWithStreamingResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.list = to_streamed_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithStreamingResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.list = async_to_streamed_response_wrapper( - models.list, - ) diff --git a/src/gradient/types/agents/evaluation_metrics/__init__.py b/src/gradient/types/agents/evaluation_metrics/__init__.py index 135d4ac9..f74dce5c 100644 --- a/src/gradient/types/agents/evaluation_metrics/__init__.py +++ b/src/gradient/types/agents/evaluation_metrics/__init__.py @@ -2,8 +2,6 @@ from __future__ import annotations -from .model_list_params import ModelListParams as ModelListParams -from .model_list_response import ModelListResponse as ModelListResponse from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams diff --git a/src/gradient/types/agents/evaluation_metrics/model_list_params.py b/src/gradient/types/agents/evaluation_metrics/model_list_params.py deleted file mode 100644 index a2fa066a..00000000 --- a/src/gradient/types/agents/evaluation_metrics/model_list_params.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, TypedDict - -__all__ = ["ModelListParams"] - - -class ModelListParams(TypedDict, total=False): - page: int - """Page number.""" - - per_page: int - """Items per page.""" - - public_only: bool - """Only include models that are publicly available.""" - - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - """Include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - """ diff --git a/src/gradient/types/agents/evaluation_metrics/model_list_response.py b/src/gradient/types/agents/evaluation_metrics/model_list_response.py deleted file mode 100644 index 2fc17524..00000000 --- a/src/gradient/types/agents/evaluation_metrics/model_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...api_model import APIModel -from ...shared.api_meta import APIMeta -from ...shared.api_links import APILinks - -__all__ = ["ModelListResponse"] - - -class ModelListResponse(BaseModel): - links: Optional[APILinks] = None - """Links to other pages""" - - meta: Optional[APIMeta] = None - """Meta information about the data set""" - - models: Optional[List[APIModel]] = None - """The models""" diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py deleted file mode 100644 index 677b3383..00000000 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradient import Gradient, AsyncGradient -from tests.utils import assert_matches_type -from gradient.types.agents.evaluation_metrics import ModelListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestModels: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_method_list(self, client: Gradient) -> None: - model = client.agents.evaluation_metrics.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_method_list_with_all_params(self, client: Gradient) -> None: - model = client.agents.evaluation_metrics.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_raw_response_list(self, client: Gradient) -> None: - response = client.agents.evaluation_metrics.models.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - def test_streaming_response_list(self, client: Gradient) -> None: - with client.agents.evaluation_metrics.models.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncModels: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_method_list(self, async_client: AsyncGradient) -> None: - model = await async_client.agents.evaluation_metrics.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None: - model = await async_client.agents.evaluation_metrics.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradient) -> None: - response = await async_client.agents.evaluation_metrics.models.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip(reason="Prism tests are disabled") - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradient) -> None: - async with async_client.agents.evaluation_metrics.models.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True From 16103e7301a460f7953ce1a4a9efc44643146cf5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 20:43:02 +0000 Subject: [PATCH 199/200] chore(internal): version bump --- .release-please-manifest.json | 2 +- pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 27d2fbbc..078b9e28 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.7.0" + ".": "3.8.0" } \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 8abecd07..fc881d12 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.7.0" +version = "3.8.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 6e29a000..defad636 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.7.0" # x-release-please-version +__version__ = "3.8.0" # x-release-please-version From cbb4d32811e1446820c6a6a8620b85d887b7ae3d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 22 Nov 2025 04:57:13 +0000 Subject: [PATCH 200/200] chore: add Python 3.14 classifier and testing --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index fc881d12..b1014da9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS",